summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--.buildkite/pipeline.yaml6
-rw-r--r--.github/workflows/build.yml4
-rw-r--r--.github/workflows/go.yml4
-rw-r--r--BUILD1
-rw-r--r--Makefile1
-rw-r--r--WORKSPACE4
-rw-r--r--go.mod2
-rw-r--r--go.sum2
-rw-r--r--nogo.yaml47
-rw-r--r--pkg/abi/linux/ptrace_amd64.go11
-rw-r--r--pkg/abi/linux/ptrace_arm64.go11
-rw-r--r--pkg/log/BUILD1
-rw-r--r--pkg/log/log.go3
-rw-r--r--pkg/sentry/fs/copy_up.go11
-rw-r--r--pkg/sentry/fs/gofer/inode_state.go1
-rw-r--r--pkg/sentry/fs/proc/sys_net.go121
-rw-r--r--pkg/sentry/fsimpl/devpts/devpts.go5
-rw-r--r--pkg/sentry/fsimpl/ext/filesystem.go5
-rw-r--r--pkg/sentry/fsimpl/fuse/fusefs.go128
-rw-r--r--pkg/sentry/fsimpl/gofer/filesystem.go57
-rw-r--r--pkg/sentry/fsimpl/gofer/gofer.go110
-rw-r--r--pkg/sentry/fsimpl/host/host.go5
-rw-r--r--pkg/sentry/fsimpl/kernfs/kernfs_test.go5
-rw-r--r--pkg/sentry/fsimpl/overlay/filesystem.go15
-rw-r--r--pkg/sentry/fsimpl/pipefs/pipefs.go5
-rw-r--r--pkg/sentry/fsimpl/proc/filesystem.go5
-rw-r--r--pkg/sentry/fsimpl/proc/tasks_sys.go78
-rw-r--r--pkg/sentry/fsimpl/sockfs/sockfs.go5
-rw-r--r--pkg/sentry/fsimpl/sys/sys.go5
-rw-r--r--pkg/sentry/fsimpl/tmpfs/filesystem.go5
-rw-r--r--pkg/sentry/fsimpl/tmpfs/tmpfs.go5
-rw-r--r--pkg/sentry/fsimpl/verity/filesystem.go17
-rw-r--r--pkg/sentry/fsimpl/verity/verity.go68
-rw-r--r--pkg/sentry/inet/inet.go8
-rw-r--r--pkg/sentry/inet/test_stack.go12
-rw-r--r--pkg/sentry/pgalloc/pgalloc.go120
-rw-r--r--pkg/sentry/socket/hostinet/stack.go11
-rw-r--r--pkg/sentry/socket/netstack/netstack.go161
-rw-r--r--pkg/sentry/socket/netstack/stack.go14
-rw-r--r--pkg/sentry/vfs/anonfs.go5
-rw-r--r--pkg/sentry/vfs/dentry.go8
-rw-r--r--pkg/sentry/vfs/filesystem.go9
-rw-r--r--pkg/sentry/vfs/mount.go25
-rw-r--r--pkg/sync/mutex_unsafe.go1
-rw-r--r--pkg/sync/rwmutex_unsafe.go6
-rw-r--r--pkg/syserr/netstack.go3
-rw-r--r--pkg/tcpip/buffer/view.go10
-rw-r--r--pkg/tcpip/buffer/view_test.go46
-rw-r--r--pkg/tcpip/checker/checker.go18
-rw-r--r--pkg/tcpip/errors.go13
-rw-r--r--pkg/tcpip/header/checksum.go53
-rw-r--r--pkg/tcpip/header/checksum_test.go113
-rw-r--r--pkg/tcpip/header/icmpv4.go5
-rw-r--r--pkg/tcpip/header/icmpv6.go17
-rw-r--r--pkg/tcpip/header/parse/parse.go30
-rw-r--r--pkg/tcpip/header/tcp.go28
-rw-r--r--pkg/tcpip/header/tcp_test.go20
-rw-r--r--pkg/tcpip/link/fdbased/endpoint.go4
-rw-r--r--pkg/tcpip/link/fdbased/endpoint_test.go6
-rw-r--r--pkg/tcpip/link/fdbased/packet_dispatchers.go4
-rw-r--r--pkg/tcpip/link/sharedmem/sharedmem_test.go6
-rw-r--r--pkg/tcpip/link/sniffer/sniffer.go14
-rw-r--r--pkg/tcpip/link/tun/device.go2
-rw-r--r--pkg/tcpip/network/arp/arp.go2
-rw-r--r--pkg/tcpip/network/internal/fragmentation/fragmentation.go6
-rw-r--r--pkg/tcpip/network/internal/fragmentation/fragmentation_test.go16
-rw-r--r--pkg/tcpip/network/internal/fragmentation/reassembler.go4
-rw-r--r--pkg/tcpip/network/internal/fragmentation/reassembler_test.go2
-rw-r--r--pkg/tcpip/network/internal/ip/duplicate_address_detection.go17
-rw-r--r--pkg/tcpip/network/internal/ip/duplicate_address_detection_test.go22
-rw-r--r--pkg/tcpip/network/internal/ip/stats.go2
-rw-r--r--pkg/tcpip/network/ip_test.go21
-rw-r--r--pkg/tcpip/network/ipv4/BUILD2
-rw-r--r--pkg/tcpip/network/ipv4/icmp.go27
-rw-r--r--pkg/tcpip/network/ipv4/igmp.go4
-rw-r--r--pkg/tcpip/network/ipv4/igmp_test.go4
-rw-r--r--pkg/tcpip/network/ipv4/ipv4.go23
-rw-r--r--pkg/tcpip/network/ipv4/ipv4_test.go121
-rw-r--r--pkg/tcpip/network/ipv4/stats.go4
-rw-r--r--pkg/tcpip/network/ipv6/icmp.go140
-rw-r--r--pkg/tcpip/network/ipv6/icmp_test.go52
-rw-r--r--pkg/tcpip/network/ipv6/ipv6.go66
-rw-r--r--pkg/tcpip/network/ipv6/ipv6_test.go14
-rw-r--r--pkg/tcpip/network/ipv6/mld.go6
-rw-r--r--pkg/tcpip/network/ipv6/mld_test.go10
-rw-r--r--pkg/tcpip/network/ipv6/ndp.go68
-rw-r--r--pkg/tcpip/network/ipv6/ndp_test.go50
-rw-r--r--pkg/tcpip/network/multicast_group_test.go14
-rw-r--r--pkg/tcpip/ports/BUILD5
-rw-r--r--pkg/tcpip/ports/flags.go150
-rw-r--r--pkg/tcpip/ports/ports.go619
-rw-r--r--pkg/tcpip/ports/ports_test.go59
-rw-r--r--pkg/tcpip/stack/conntrack.go4
-rw-r--r--pkg/tcpip/stack/iptables_targets.go2
-rw-r--r--pkg/tcpip/stack/ndp_test.go123
-rw-r--r--pkg/tcpip/stack/neighbor_cache_test.go1064
-rw-r--r--pkg/tcpip/stack/neighbor_entry_test.go19
-rw-r--r--pkg/tcpip/stack/nic.go6
-rw-r--r--pkg/tcpip/stack/packet_buffer.go248
-rw-r--r--pkg/tcpip/stack/packet_buffer_test.go224
-rw-r--r--pkg/tcpip/stack/registration.go44
-rw-r--r--pkg/tcpip/stack/stack.go12
-rw-r--r--pkg/tcpip/stack/stack_test.go10
-rw-r--r--pkg/tcpip/stack/transport_demuxer.go6
-rw-r--r--pkg/tcpip/tcpip.go171
-rw-r--r--pkg/tcpip/tests/integration/forward_test.go35
-rw-r--r--pkg/tcpip/tests/integration/link_resolution_test.go78
-rw-r--r--pkg/tcpip/tests/integration/loopback_test.go2
-rw-r--r--pkg/tcpip/tests/integration/multicast_broadcast_test.go6
-rw-r--r--pkg/tcpip/transport/icmp/endpoint.go46
-rw-r--r--pkg/tcpip/transport/packet/endpoint.go6
-rw-r--r--pkg/tcpip/transport/raw/endpoint.go2
-rw-r--r--pkg/tcpip/transport/tcp/BUILD1
-rw-r--r--pkg/tcpip/transport/tcp/accept.go20
-rw-r--r--pkg/tcpip/transport/tcp/connect.go12
-rw-r--r--pkg/tcpip/transport/tcp/endpoint.go76
-rw-r--r--pkg/tcpip/transport/tcp/endpoint_state.go13
-rw-r--r--pkg/tcpip/transport/tcp/protocol.go2
-rw-r--r--pkg/tcpip/transport/tcp/segment.go8
-rw-r--r--pkg/tcpip/transport/tcp/snd.go2
-rw-r--r--pkg/tcpip/transport/tcp/tcp_test.go70
-rw-r--r--pkg/tcpip/transport/tcp/tcp_timestamp_test.go4
-rw-r--r--pkg/tcpip/transport/tcp/testing/context/context.go12
-rw-r--r--pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go2
-rw-r--r--pkg/tcpip/transport/udp/endpoint.go54
-rw-r--r--pkg/tcpip/transport/udp/protocol.go2
-rw-r--r--runsc/boot/compat.go12
-rw-r--r--runsc/boot/compat_amd64.go4
-rw-r--r--runsc/boot/controller.go4
-rw-r--r--runsc/boot/filter/config.go379
-rw-r--r--runsc/boot/filter/config_amd64.go33
-rw-r--r--runsc/boot/filter/config_arm64.go17
-rw-r--r--runsc/boot/filter/config_profile.go7
-rw-r--r--runsc/boot/filter/extra_filters_msan.go11
-rw-r--r--runsc/boot/filter/extra_filters_race.go25
-rw-r--r--runsc/boot/fs.go12
-rw-r--r--runsc/boot/limits.go8
-rw-r--r--runsc/boot/loader_test.go7
-rw-r--r--runsc/boot/network.go4
-rw-r--r--runsc/cgroup/cgroup.go9
-rw-r--r--runsc/cli/BUILD1
-rw-r--r--runsc/cli/main.go8
-rw-r--r--runsc/cmd/BUILD3
-rw-r--r--runsc/cmd/boot.go5
-rw-r--r--runsc/cmd/checkpoint.go4
-rw-r--r--runsc/cmd/chroot.go16
-rw-r--r--runsc/cmd/cmd.go10
-rw-r--r--runsc/cmd/debug.go6
-rw-r--r--runsc/cmd/do.go6
-rw-r--r--runsc/cmd/exec.go12
-rw-r--r--runsc/cmd/gofer.go43
-rw-r--r--runsc/cmd/kill.go7
-rw-r--r--runsc/cmd/mitigate.go122
-rw-r--r--runsc/cmd/mitigate_test.go169
-rw-r--r--runsc/cmd/restore.go4
-rw-r--r--runsc/cmd/run.go4
-rw-r--r--runsc/cmd/wait.go6
-rw-r--r--runsc/config/config.go3
-rw-r--r--runsc/config/flags.go1
-rw-r--r--runsc/container/BUILD1
-rw-r--r--runsc/container/console_test.go15
-rw-r--r--runsc/container/container.go35
-rw-r--r--runsc/container/container_test.go50
-rw-r--r--runsc/container/multi_container_test.go34
-rw-r--r--runsc/container/state_file.go6
-rw-r--r--runsc/fsgofer/filter/config.go153
-rw-r--r--runsc/fsgofer/filter/config_amd64.go35
-rw-r--r--runsc/fsgofer/filter/config_arm64.go19
-rw-r--r--runsc/fsgofer/filter/extra_filters_msan.go7
-rw-r--r--runsc/fsgofer/filter/extra_filters_race.go27
-rw-r--r--runsc/fsgofer/fsgofer.go21
-rw-r--r--runsc/fsgofer/fsgofer_test.go32
-rw-r--r--runsc/mitigate/BUILD22
-rw-r--r--runsc/mitigate/cpu.go423
-rw-r--r--runsc/mitigate/cpu_test.go605
-rw-r--r--runsc/mitigate/mitigate.go467
-rw-r--r--runsc/mitigate/mitigate_test.go579
-rw-r--r--runsc/mitigate/mock/BUILD11
-rw-r--r--runsc/mitigate/mock/mock.go141
-rw-r--r--runsc/sandbox/network.go25
-rw-r--r--runsc/sandbox/network_unsafe.go3
-rw-r--r--runsc/sandbox/sandbox.go35
-rw-r--r--runsc/specutils/fs.go78
-rw-r--r--runsc/specutils/namespace.go12
-rw-r--r--runsc/specutils/seccomp/BUILD2
-rw-r--r--runsc/specutils/seccomp/seccomp.go6
-rw-r--r--runsc/specutils/seccomp/seccomp_test.go40
-rw-r--r--runsc/specutils/specutils.go14
-rw-r--r--test/benchmarks/fs/BUILD1
-rw-r--r--test/benchmarks/fs/bazel_test.go61
-rw-r--r--test/benchmarks/fs/fio_test.go59
-rw-r--r--test/benchmarks/harness/BUILD1
-rw-r--r--test/benchmarks/harness/util.go52
-rw-r--r--test/benchmarks/tcp/tcp_proxy.go17
-rw-r--r--test/fuse/linux/mount_test.cc5
-rw-r--r--test/iptables/BUILD1
-rw-r--r--test/iptables/iptables_unsafe.go31
-rw-r--r--test/iptables/nat.go90
-rw-r--r--test/packetimpact/runner/dut.go81
-rw-r--r--test/packetimpact/testbench/BUILD2
-rw-r--r--test/packetimpact/testbench/connections.go12
-rw-r--r--test/packetimpact/testbench/dut.go44
-rw-r--r--test/packetimpact/testbench/layers.go22
-rw-r--r--test/packetimpact/testbench/layers_test.go8
-rw-r--r--test/packetimpact/testbench/testbench.go62
-rw-r--r--test/packetimpact/tests/fin_wait2_timeout_test.go10
-rw-r--r--test/packetimpact/tests/ipv4_fragment_reassembly_test.go6
-rw-r--r--test/packetimpact/tests/ipv4_id_uniqueness_test.go2
-rw-r--r--test/packetimpact/tests/ipv6_fragment_icmp_error_test.go14
-rw-r--r--test/packetimpact/tests/ipv6_fragment_reassembly_test.go14
-rw-r--r--test/packetimpact/tests/tcp_cork_mss_test.go12
-rw-r--r--test/packetimpact/tests/tcp_handshake_window_size_test.go8
-rw-r--r--test/packetimpact/tests/tcp_info_test.go2
-rw-r--r--test/packetimpact/tests/tcp_linger_test.go31
-rw-r--r--test/packetimpact/tests/tcp_network_unreachable_test.go11
-rw-r--r--test/packetimpact/tests/tcp_noaccept_close_rst_test.go2
-rw-r--r--test/packetimpact/tests/tcp_outside_the_window_test.go10
-rw-r--r--test/packetimpact/tests/tcp_paws_mechanism_test.go14
-rw-r--r--test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go53
-rw-r--r--test/packetimpact/tests/tcp_rack_test.go20
-rw-r--r--test/packetimpact/tests/tcp_rcv_buf_space_test.go5
-rw-r--r--test/packetimpact/tests/tcp_retransmits_test.go2
-rw-r--r--test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go4
-rw-r--r--test/packetimpact/tests/tcp_synrcvd_reset_test.go10
-rw-r--r--test/packetimpact/tests/tcp_synsent_reset_test.go18
-rw-r--r--test/packetimpact/tests/tcp_timewait_reset_test.go14
-rw-r--r--test/packetimpact/tests/tcp_unacc_seq_ack_test.go37
-rw-r--r--test/packetimpact/tests/tcp_user_timeout_test.go6
-rw-r--r--test/packetimpact/tests/tcp_window_shrink_test.go4
-rw-r--r--test/packetimpact/tests/tcp_zero_receive_window_test.go8
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go22
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_test.go12
-rw-r--r--test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go12
-rw-r--r--test/packetimpact/tests/udp_discard_mcast_source_addr_test.go5
-rw-r--r--test/packetimpact/tests/udp_icmp_error_propagation_test.go21
-rw-r--r--test/packetimpact/tests/udp_send_recv_dgram_test.go3
-rw-r--r--test/perf/BUILD2
-rw-r--r--test/runner/gtest/gtest.go50
-rw-r--r--test/runner/runner.go226
-rw-r--r--test/runtimes/proctor/BUILD5
-rw-r--r--test/runtimes/proctor/lib/BUILD1
-rw-r--r--test/runtimes/proctor/lib/lib.go7
-rw-r--r--test/runtimes/proctor/main.go8
-rw-r--r--test/syscalls/BUILD6
-rw-r--r--test/syscalls/linux/BUILD6
-rw-r--r--test/syscalls/linux/proc.cc23
-rw-r--r--test/syscalls/linux/proc_net.cc37
-rw-r--r--test/syscalls/linux/pty.cc7
-rw-r--r--test/syscalls/linux/socket_generic_stress.cc136
-rw-r--r--test/syscalls/linux/socket_inet_loopback.cc6
-rw-r--r--test/uds/BUILD1
-rw-r--r--test/uds/uds.go24
-rw-r--r--tools/checklocks/BUILD16
-rw-r--r--tools/checklocks/README.md142
-rw-r--r--tools/checklocks/checklocks.go761
-rw-r--r--tools/checklocks/test/BUILD8
-rw-r--r--tools/checklocks/test/test.go362
-rw-r--r--tools/go_marshal/gomarshal/generator.go8
-rw-r--r--tools/go_marshal/gomarshal/generator_tests.go12
-rw-r--r--tools/nogo/BUILD1
-rw-r--r--tools/nogo/analyzers.go2
-rw-r--r--tools/verity/BUILD15
-rw-r--r--tools/verity/measure_tool.go87
-rw-r--r--tools/verity/measure_tool_unsafe.go (renamed from runsc/mitigate/mitigate_conf.go)40
264 files changed, 7808 insertions, 4433 deletions
diff --git a/.buildkite/pipeline.yaml b/.buildkite/pipeline.yaml
index cb272aef6..aa2fd1f47 100644
--- a/.buildkite/pipeline.yaml
+++ b/.buildkite/pipeline.yaml
@@ -183,9 +183,13 @@ steps:
- <<: *benchmarks
label: ":metal: FFMPEG benchmarks"
command: make benchmark-platforms BENCHMARKS_SUITE=ffmpeg BENCHMARKS_TARGETS=test/benchmarks/media:ffmpeg_test
+ # For fio, running with --test.benchtime=Xs scales the written/read
+ # bytes to several GB. This is not a problem for root/bind/volume mounts,
+ # but for tmpfs mounts, the size can grow to more memory than the machine
+ # has availabe. Fix the runs to 10GB written/read for the benchmark.
- <<: *benchmarks
label: ":floppy_disk: FIO benchmarks"
- command: make benchmark-platforms BENCHMARKS_SUITE=fio BENCHMARKS_TARGETS=test/benchmarks/fs:fio_test
+ command: make benchmark-platforms BENCHMARKS_SUITE=fio BENCHMARKS_TARGETS=test/benchmarks/fs:fio_test BENCHMARKS_OPTIONS=--test.benchtime=10000x
- <<: *benchmarks
label: ":globe_with_meridians: HTTPD benchmarks"
command: make benchmark-platforms BENCHMARKS_FILTER="Continuous" BENCHMARKS_SUITE=httpd BENCHMARKS_TARGETS=test/benchmarks/network:httpd_test
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index b0381a563..b572dc94f 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -16,6 +16,10 @@ jobs:
default:
runs-on: ubuntu-latest
steps:
+ - name: Cancel previous
+ uses: styfle/cancel-workflow-action@0.7.0
+ with:
+ access_token: ${{ github.token }}
- uses: actions/checkout@v2
- run: make
- run: make build OPTIONS="--build_tag_filters nogo" TARGETS="//..."
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
index 594dc7ffc..4c8b8ea5c 100644
--- a/.github/workflows/go.yml
+++ b/.github/workflows/go.yml
@@ -16,6 +16,10 @@ jobs:
generate:
runs-on: ubuntu-latest
steps:
+ - name: Cancel previous
+ uses: styfle/cancel-workflow-action@0.7.0
+ with:
+ access_token: ${{ github.token }}
- id: setup
run: |
if ! [[ -z "${{ secrets.GO_TOKEN }}" ]]; then
diff --git a/BUILD b/BUILD
index 49cfb7c8e..6cbd2c413 100644
--- a/BUILD
+++ b/BUILD
@@ -122,6 +122,7 @@ go_path(
# Packages that are not dependencies of the above.
"//pkg/sentry/kernel/memevent",
"//pkg/tcpip/adapters/gonet",
+ "//pkg/tcpip/faketime",
"//pkg/tcpip/link/channel",
"//pkg/tcpip/link/ethernet",
"//pkg/tcpip/link/muxed",
diff --git a/Makefile b/Makefile
index de22509cd..e424308fa 100644
--- a/Makefile
+++ b/Makefile
@@ -143,6 +143,7 @@ dev: $(RUNTIME_BIN) ## Installs a set of local runtimes. Requires sudo.
@$(call configure_noreload,$(RUNTIME)-d,--net-raw --debug --strace --log-packets)
@$(call configure_noreload,$(RUNTIME)-p,--net-raw --profile)
@$(call configure_noreload,$(RUNTIME)-vfs2-d,--net-raw --debug --strace --log-packets --vfs2)
+ @$(call configure_noreload,$(RUNTIME)-vfs2-fuse-d,--net-raw --debug --strace --log-packets --vfs2 --fuse)
@$(call reload_docker)
.PHONY: dev
diff --git a/WORKSPACE b/WORKSPACE
index 4ee93a670..779ee6ae6 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -303,8 +303,8 @@ go_repository(
go_repository(
name = "com_github_gofrs_flock",
importpath = "github.com/gofrs/flock",
- sum = "h1:JFTFz3HZTGmgMz4E1TabNBNJljROSYgja1b4l50FNVs=",
- version = "v0.6.1-0.20180915234121-886344bea079",
+ sum = "h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY=",
+ version = "v0.8.0",
)
go_repository(
diff --git a/go.mod b/go.mod
index 0774d2930..870fb0b83 100644
--- a/go.mod
+++ b/go.mod
@@ -20,7 +20,7 @@ require (
github.com/docker/docker v1.4.2-0.20191028175130-9e7d5ac5ea55 // indirect
github.com/docker/go-connections v0.3.0 // indirect
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect
- github.com/gofrs/flock v0.6.1-0.20180915234121-886344bea079 // indirect
+ github.com/gofrs/flock v0.8.0 // indirect
github.com/gogo/googleapis v1.4.0 // indirect
github.com/gogo/protobuf v1.3.1 // indirect
github.com/golang/mock v1.4.4 // indirect
diff --git a/go.sum b/go.sum
index 9d7ef2243..bd3d0b0f7 100644
--- a/go.sum
+++ b/go.sum
@@ -135,6 +135,8 @@ github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME=
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gofrs/flock v0.6.1-0.20180915234121-886344bea079 h1:JFTFz3HZTGmgMz4E1TabNBNJljROSYgja1b4l50FNVs=
github.com/gofrs/flock v0.6.1-0.20180915234121-886344bea079/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
+github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY=
+github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI=
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
diff --git a/nogo.yaml b/nogo.yaml
index 4c4cdb8c1..c0445a837 100644
--- a/nogo.yaml
+++ b/nogo.yaml
@@ -46,8 +46,6 @@ global:
- "(field|method|struct|type) .* should be .*"
# Generated proto code sometimes duplicates imports with aliases.
- "duplicate import"
- # TODO(b/179817829): Upgrade to flock to v0.8.0.
- - "flock.NewFlock is deprecated: Use New instead"
internal:
suppress:
# We use ALL_CAPS for system definitions,
@@ -77,6 +75,51 @@ analyzers:
external: # Enabled.
cgocall:
external: # Enabled.
+ checklocks:
+ internal:
+ exclude:
+ - "^-$" # b/181776900: analyzer fails on buildkite
+ - pkg/sentry/fs/dirent.go # unsupported usage.
+ - pkg/sentry/fs/fsutil/inode_cached.go # unsupported usage.
+ - pkg/sentry/fs/gofer/inode_state.go # unsupported usage.
+ - pkg/sentry/fs/gofer/session.go # unsupported usage.
+ - pkg/sentry/fs/ramfs/dir.go # unsupported usage.
+ - pkg/sentry/fsimpl/fuse/connection.go # unsupported usage.
+ - pkg/sentry/fsimpl/kernfs/filesystem.go # unsupported usage.
+ - pkg/sentry/fsimpl/kernfs/inode_impl_util.go # unsupported usage.
+ - pkg/sentry/fsimpl/fuse/dev_test.go # unsupported usage.
+ - pkg/sentry/fsimpl/gofer/filesystem.go # unsupported usage.
+ - pkg/sentry/fsimpl/gofer/gofer.go # unsupported usage.
+ - pkg/sentry/fsimpl/gofer/regular_file.go # unsupported usage.
+ - pkg/sentry/fsimpl/gofer/special_file.go # unsupported usage.
+ - pkg/sentry/fsimpl/gofer/symlink.go # unsupported usage.
+ - pkg/sentry/fsimpl/overlay/copy_up.go # unsupported usage.
+ - pkg/sentry/fsimpl/overlay/filesystem.go # unsupported usage.
+ - pkg/sentry/fsimpl/tmpfs/filesystem.go # unsupported usage.
+ - pkg/sentry/fsimpl/verity/filesystem.go # unsupported usage.
+ - pkg/sentry/kernel/futex/futex.go # unsupported usage.
+ - pkg/sentry/kernel/pipe/vfs.go # unsupported usage.
+ - pkg/sentry/mm/syscalls.go # unsupported usage.
+ - pkg/sentry/kernel/fd_table.go # unsupported usage.
+ - pkg/sentry/kernel/ptrace.go # unsupported usage.
+ - pkg/sentry/time/calibrated_clock_test.go # unsupported usage.
+ - pkg/sentry/kernel/task_context.go # unsupported usage.
+ - pkg/sentry/pgalloc/pgalloc.go # unsupported usage.
+ - pkg/sentry/socket/unix/transport/connectioned.go # unsupported usage.
+ - pkg/sentry/vfs/dentry.go # unsupported usage.
+ - pkg/tcpip/network/internal/ip/generic_multicast_protocol_test.go # unsupported usage.
+ - pkg/tcpip/stack/conntrack.go # unsupported usage.
+ - pkg/tcpip/transport/packet/endpoint_state.go # unsupported usage.
+ - pkg/tcpip/transport/raw/endpoint_state.go # unsupported usage.
+ - pkg/tcpip/transport/icmp/endpoint.go # unsupported usage.
+ - pkg/tcpip/transport/icmp/endpoint_state.go # unsupported usage.
+ - pkg/tcpip/transport/tcp/accept.go # unsupported usage.
+ - pkg/tcpip/transport/tcp/connect.go # unsupported usage.
+ - pkg/tcpip/transport/tcp/dispatcher.go # unsupported usage (TryLock)
+ - pkg/tcpip/transport/tcp/endpoint.go # unsupported usage.
+ - pkg/tcpip/transport/tcp/endpoint_state.go # unsupported usage.
+ - pkg/tcpip/transport/udp/endpoint.go # unsupported usage (defer unlock in anonymous function)
+ - pkg/tcpip/transport/udp/endpoint_state.go # unsupported usage (missing nested mutex annotation support)
shadow: # Disable for now.
generated:
exclude: [".*"]
diff --git a/pkg/abi/linux/ptrace_amd64.go b/pkg/abi/linux/ptrace_amd64.go
index ed3881e27..50e22fe7e 100644
--- a/pkg/abi/linux/ptrace_amd64.go
+++ b/pkg/abi/linux/ptrace_amd64.go
@@ -50,3 +50,14 @@ type PtraceRegs struct {
Fs uint64
Gs uint64
}
+
+// InstructionPointer returns the address of the next instruction to
+// be executed.
+func (p *PtraceRegs) InstructionPointer() uint64 {
+ return p.Rip
+}
+
+// StackPointer returns the address of the Stack pointer.
+func (p *PtraceRegs) StackPointer() uint64 {
+ return p.Rsp
+}
diff --git a/pkg/abi/linux/ptrace_arm64.go b/pkg/abi/linux/ptrace_arm64.go
index 6147738b3..da36811d2 100644
--- a/pkg/abi/linux/ptrace_arm64.go
+++ b/pkg/abi/linux/ptrace_arm64.go
@@ -27,3 +27,14 @@ type PtraceRegs struct {
Pc uint64
Pstate uint64
}
+
+// InstructionPointer returns the address of the next instruction to be
+// executed.
+func (p *PtraceRegs) InstructionPointer() uint64 {
+ return p.Pc
+}
+
+// StackPointer returns the address of the Stack pointer.
+func (p *PtraceRegs) StackPointer() uint64 {
+ return p.Sp
+}
diff --git a/pkg/log/BUILD b/pkg/log/BUILD
index 23ef7ea8d..3ed6aba5c 100644
--- a/pkg/log/BUILD
+++ b/pkg/log/BUILD
@@ -18,7 +18,6 @@ go_library(
deps = [
"//pkg/linewriter",
"//pkg/sync",
- "@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/pkg/log/log.go b/pkg/log/log.go
index d39af3bf4..073cf6238 100644
--- a/pkg/log/log.go
+++ b/pkg/log/log.go
@@ -40,7 +40,6 @@ import (
"sync/atomic"
"time"
- "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/linewriter"
"gvisor.dev/gvisor/pkg/sync"
)
@@ -105,7 +104,7 @@ func (l *Writer) Write(data []byte) (int, error) {
n += w
// Is it a non-blocking socket?
- if pathErr, ok := err.(*os.PathError); ok && pathErr.Err == unix.EAGAIN {
+ if pathErr, ok := err.(*os.PathError); ok && pathErr.Timeout() {
runtime.Gosched()
continue
}
diff --git a/pkg/sentry/fs/copy_up.go b/pkg/sentry/fs/copy_up.go
index 8e0aa9019..58deb25fc 100644
--- a/pkg/sentry/fs/copy_up.go
+++ b/pkg/sentry/fs/copy_up.go
@@ -303,17 +303,18 @@ func copyUpLocked(ctx context.Context, parent *Dirent, next *Dirent) error {
// Take a reference on the upper Inode (transferred to
// next.Inode.overlay.upper) and make new translations use it.
- next.Inode.overlay.dataMu.Lock()
+ overlay := next.Inode.overlay
+ overlay.dataMu.Lock()
childUpperInode.IncRef()
- next.Inode.overlay.upper = childUpperInode
- next.Inode.overlay.dataMu.Unlock()
+ overlay.upper = childUpperInode
+ overlay.dataMu.Unlock()
// Invalidate existing translations through the lower Inode.
- next.Inode.overlay.mappings.InvalidateAll(memmap.InvalidateOpts{})
+ overlay.mappings.InvalidateAll(memmap.InvalidateOpts{})
// Remove existing memory mappings from the lower Inode.
if lowerMappable != nil {
- for seg := next.Inode.overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
+ for seg := overlay.mappings.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
for m := range seg.Value() {
lowerMappable.RemoveMapping(ctx, m.MappingSpace, m.AddrRange, seg.Start(), m.Writable)
}
diff --git a/pkg/sentry/fs/gofer/inode_state.go b/pkg/sentry/fs/gofer/inode_state.go
index 141e3c27f..e2af1d2ae 100644
--- a/pkg/sentry/fs/gofer/inode_state.go
+++ b/pkg/sentry/fs/gofer/inode_state.go
@@ -109,6 +109,7 @@ func (i *inodeFileState) loadLoading(_ struct{}) {
}
// afterLoad is invoked by stateify.
+// +checklocks:i.loading
func (i *inodeFileState) afterLoad() {
load := func() (err error) {
// See comment on i.loading().
diff --git a/pkg/sentry/fs/proc/sys_net.go b/pkg/sentry/fs/proc/sys_net.go
index 52061175f..bbe282c03 100644
--- a/pkg/sentry/fs/proc/sys_net.go
+++ b/pkg/sentry/fs/proc/sys_net.go
@@ -17,6 +17,7 @@ package proc
import (
"fmt"
"io"
+ "math"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
@@ -26,6 +27,7 @@ import (
"gvisor.dev/gvisor/pkg/sentry/fs/ramfs"
"gvisor.dev/gvisor/pkg/sentry/inet"
"gvisor.dev/gvisor/pkg/sync"
+ "gvisor.dev/gvisor/pkg/syserror"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
"gvisor.dev/gvisor/pkg/usermem"
"gvisor.dev/gvisor/pkg/waiter"
@@ -498,6 +500,120 @@ func (f *ipForwardingFile) Write(ctx context.Context, _ *fs.File, src usermem.IO
return n, f.stack.SetForwarding(ipv4.ProtocolNumber, *f.ipf.enabled)
}
+// portRangeInode implements fs.InodeOperations. It provides and allows
+// modification of the range of ephemeral ports that IPv4 and IPv6 sockets
+// choose from.
+//
+// +stateify savable
+type portRangeInode struct {
+ fsutil.SimpleFileInode
+
+ stack inet.Stack `state:"wait"`
+
+ // start and end store the port range. We must save/restore this here,
+ // since a netstack instance is created on restore.
+ start *uint16
+ end *uint16
+}
+
+func newPortRangeInode(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode {
+ ipf := &portRangeInode{
+ SimpleFileInode: *fsutil.NewSimpleFileInode(ctx, fs.RootOwner, fs.FilePermsFromMode(0644), linux.PROC_SUPER_MAGIC),
+ stack: s,
+ }
+ sattr := fs.StableAttr{
+ DeviceID: device.ProcDevice.DeviceID(),
+ InodeID: device.ProcDevice.NextIno(),
+ BlockSize: usermem.PageSize,
+ Type: fs.SpecialFile,
+ }
+ return fs.NewInode(ctx, ipf, msrc, sattr)
+}
+
+// Truncate implements fs.InodeOperations.Truncate. Truncate is called when
+// O_TRUNC is specified for any kind of existing Dirent but is not called via
+// (f)truncate for proc files.
+func (*portRangeInode) Truncate(context.Context, *fs.Inode, int64) error {
+ return nil
+}
+
+// +stateify savable
+type portRangeFile struct {
+ fsutil.FileGenericSeek `state:"nosave"`
+ fsutil.FileNoIoctl `state:"nosave"`
+ fsutil.FileNoMMap `state:"nosave"`
+ fsutil.FileNoSplice `state:"nosave"`
+ fsutil.FileNoopFlush `state:"nosave"`
+ fsutil.FileNoopFsync `state:"nosave"`
+ fsutil.FileNoopRelease `state:"nosave"`
+ fsutil.FileNotDirReaddir `state:"nosave"`
+ fsutil.FileUseInodeUnstableAttr `state:"nosave"`
+ waiter.AlwaysReady `state:"nosave"`
+
+ inode *portRangeInode
+}
+
+// GetFile implements fs.InodeOperations.GetFile.
+func (in *portRangeInode) GetFile(ctx context.Context, dirent *fs.Dirent, flags fs.FileFlags) (*fs.File, error) {
+ flags.Pread = true
+ flags.Pwrite = true
+ return fs.NewFile(ctx, dirent, flags, &portRangeFile{
+ inode: in,
+ }), nil
+}
+
+// Read implements fs.FileOperations.Read.
+func (pf *portRangeFile) Read(ctx context.Context, _ *fs.File, dst usermem.IOSequence, offset int64) (int64, error) {
+ if offset != 0 {
+ return 0, io.EOF
+ }
+
+ if pf.inode.start == nil {
+ start, end := pf.inode.stack.PortRange()
+ pf.inode.start = &start
+ pf.inode.end = &end
+ }
+
+ contents := fmt.Sprintf("%d %d\n", *pf.inode.start, *pf.inode.end)
+ n, err := dst.CopyOut(ctx, []byte(contents))
+ return int64(n), err
+}
+
+// Write implements fs.FileOperations.Write.
+//
+// Offset is ignored, multiple writes are not supported.
+func (pf *portRangeFile) Write(ctx context.Context, _ *fs.File, src usermem.IOSequence, offset int64) (int64, error) {
+ if src.NumBytes() == 0 {
+ return 0, nil
+ }
+
+ // Only consider size of one memory page for input for performance
+ // reasons.
+ src = src.TakeFirst(usermem.PageSize - 1)
+
+ ports := make([]int32, 2)
+ n, err := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, ports, src.Opts)
+ if err != nil {
+ return 0, err
+ }
+
+ // Port numbers must be uint16s.
+ if ports[0] < 0 || ports[1] < 0 || ports[0] > math.MaxUint16 || ports[1] > math.MaxUint16 {
+ return 0, syserror.EINVAL
+ }
+
+ if err := pf.inode.stack.SetPortRange(uint16(ports[0]), uint16(ports[1])); err != nil {
+ return 0, err
+ }
+ if pf.inode.start == nil {
+ pf.inode.start = new(uint16)
+ pf.inode.end = new(uint16)
+ }
+ *pf.inode.start = uint16(ports[0])
+ *pf.inode.end = uint16(ports[1])
+ return n, nil
+}
+
func (p *proc) newSysNetIPv4Dir(ctx context.Context, msrc *fs.MountSource, s inet.Stack) *fs.Inode {
contents := map[string]*fs.Inode{
// Add tcp_sack.
@@ -506,12 +622,15 @@ func (p *proc) newSysNetIPv4Dir(ctx context.Context, msrc *fs.MountSource, s ine
// Add ip_forward.
"ip_forward": newIPForwardingInode(ctx, msrc, s),
+ // Allow for configurable ephemeral port ranges. Note that this
+ // controls ports for both IPv4 and IPv6 sockets.
+ "ip_local_port_range": newPortRangeInode(ctx, msrc, s),
+
// The following files are simple stubs until they are
// implemented in netstack, most of these files are
// configuration related. We use the value closest to the
// actual netstack behavior or any empty file, all of these
// files will have mode 0444 (read-only for all users).
- "ip_local_port_range": newStaticProcInode(ctx, msrc, []byte("16000 65535")),
"ip_local_reserved_ports": newStaticProcInode(ctx, msrc, []byte("")),
"ipfrag_time": newStaticProcInode(ctx, msrc, []byte("30")),
"ip_nonlocal_bind": newStaticProcInode(ctx, msrc, []byte("0")),
diff --git a/pkg/sentry/fsimpl/devpts/devpts.go b/pkg/sentry/fsimpl/devpts/devpts.go
index d8c237753..e75954105 100644
--- a/pkg/sentry/fsimpl/devpts/devpts.go
+++ b/pkg/sentry/fsimpl/devpts/devpts.go
@@ -137,6 +137,11 @@ func (fs *filesystem) Release(ctx context.Context) {
fs.Filesystem.Release(ctx)
}
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return ""
+}
+
// rootInode is the root directory inode for the devpts mounts.
//
// +stateify savable
diff --git a/pkg/sentry/fsimpl/ext/filesystem.go b/pkg/sentry/fsimpl/ext/filesystem.go
index 917f1873d..d4fc484a2 100644
--- a/pkg/sentry/fsimpl/ext/filesystem.go
+++ b/pkg/sentry/fsimpl/ext/filesystem.go
@@ -548,3 +548,8 @@ func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDe
defer fs.mu.RUnlock()
return genericPrependPath(vfsroot, vd.Mount(), vd.Dentry().Impl().(*dentry), b)
}
+
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return ""
+}
diff --git a/pkg/sentry/fsimpl/fuse/fusefs.go b/pkg/sentry/fsimpl/fuse/fusefs.go
index 204d8d143..fef857afb 100644
--- a/pkg/sentry/fsimpl/fuse/fusefs.go
+++ b/pkg/sentry/fsimpl/fuse/fusefs.go
@@ -47,19 +47,14 @@ type FilesystemType struct{}
// +stateify savable
type filesystemOptions struct {
- // userID specifies the numeric uid of the mount owner.
- // This option should not be specified by the filesystem owner.
- // It is set by libfuse (or, if libfuse is not used, must be set
- // by the filesystem itself). For more information, see man page
- // for fuse(8)
- userID uint32
-
- // groupID specifies the numeric gid of the mount owner.
- // This option should not be specified by the filesystem owner.
- // It is set by libfuse (or, if libfuse is not used, must be set
- // by the filesystem itself). For more information, see man page
- // for fuse(8)
- groupID uint32
+ // mopts contains the raw, unparsed mount options passed to this filesystem.
+ mopts string
+
+ // uid of the mount owner.
+ uid auth.KUID
+
+ // gid of the mount owner.
+ gid auth.KGID
// rootMode specifies the the file mode of the filesystem's root.
rootMode linux.FileMode
@@ -73,6 +68,19 @@ type filesystemOptions struct {
// specified as "max_read" in fs parameters.
// If not specified by user, use math.MaxUint32 as default value.
maxRead uint32
+
+ // defaultPermissions is the default_permissions mount option. It instructs
+ // the kernel to perform a standard unix permission checks based on
+ // ownership and mode bits, instead of deferring the check to the server.
+ //
+ // Immutable after mount.
+ defaultPermissions bool
+
+ // allowOther is the allow_other mount option. It allows processes that
+ // don't own the FUSE mount to call into it.
+ //
+ // Immutable after mount.
+ allowOther bool
}
// filesystem implements vfs.FilesystemImpl.
@@ -108,18 +116,18 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
return nil, nil, err
}
- var fsopts filesystemOptions
+ fsopts := filesystemOptions{mopts: opts.Data}
mopts := vfs.GenericParseMountOptions(opts.Data)
deviceDescriptorStr, ok := mopts["fd"]
if !ok {
- log.Warningf("%s.GetFilesystem: communication file descriptor N (obtained by opening /dev/fuse) must be specified as 'fd=N'", fsType.Name())
+ ctx.Warningf("fusefs.FilesystemType.GetFilesystem: mandatory mount option fd missing")
return nil, nil, syserror.EINVAL
}
delete(mopts, "fd")
deviceDescriptor, err := strconv.ParseInt(deviceDescriptorStr, 10 /* base */, 32 /* bitSize */)
if err != nil {
- log.Debugf("%s.GetFilesystem: device FD '%v' not parsable: %v", fsType.Name(), deviceDescriptorStr, err)
+ ctx.Debugf("fusefs.FilesystemType.GetFilesystem: invalid fd: %q (%v)", deviceDescriptorStr, err)
return nil, nil, syserror.EINVAL
}
@@ -141,38 +149,54 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
// Parse and set all the other supported FUSE mount options.
// TODO(gVisor.dev/issue/3229): Expand the supported mount options.
- if userIDStr, ok := mopts["user_id"]; ok {
+ if uidStr, ok := mopts["user_id"]; ok {
delete(mopts, "user_id")
- userID, err := strconv.ParseUint(userIDStr, 10, 32)
+ uid, err := strconv.ParseUint(uidStr, 10, 32)
if err != nil {
- log.Warningf("%s.GetFilesystem: invalid user_id: user_id=%s", fsType.Name(), userIDStr)
+ log.Warningf("%s.GetFilesystem: invalid user_id: user_id=%s", fsType.Name(), uidStr)
return nil, nil, syserror.EINVAL
}
- fsopts.userID = uint32(userID)
+ kuid := creds.UserNamespace.MapToKUID(auth.UID(uid))
+ if !kuid.Ok() {
+ ctx.Warningf("fusefs.FilesystemType.GetFilesystem: unmapped uid: %d", uid)
+ return nil, nil, syserror.EINVAL
+ }
+ fsopts.uid = kuid
+ } else {
+ ctx.Warningf("fusefs.FilesystemType.GetFilesystem: mandatory mount option user_id missing")
+ return nil, nil, syserror.EINVAL
}
- if groupIDStr, ok := mopts["group_id"]; ok {
+ if gidStr, ok := mopts["group_id"]; ok {
delete(mopts, "group_id")
- groupID, err := strconv.ParseUint(groupIDStr, 10, 32)
+ gid, err := strconv.ParseUint(gidStr, 10, 32)
if err != nil {
- log.Warningf("%s.GetFilesystem: invalid group_id: group_id=%s", fsType.Name(), groupIDStr)
+ log.Warningf("%s.GetFilesystem: invalid group_id: group_id=%s", fsType.Name(), gidStr)
+ return nil, nil, syserror.EINVAL
+ }
+ kgid := creds.UserNamespace.MapToKGID(auth.GID(gid))
+ if !kgid.Ok() {
+ ctx.Warningf("fusefs.FilesystemType.GetFilesystem: unmapped gid: %d", gid)
return nil, nil, syserror.EINVAL
}
- fsopts.groupID = uint32(groupID)
+ fsopts.gid = kgid
+ } else {
+ ctx.Warningf("fusefs.FilesystemType.GetFilesystem: mandatory mount option group_id missing")
+ return nil, nil, syserror.EINVAL
}
- rootMode := linux.FileMode(0777)
- modeStr, ok := mopts["rootmode"]
- if ok {
+ if modeStr, ok := mopts["rootmode"]; ok {
delete(mopts, "rootmode")
mode, err := strconv.ParseUint(modeStr, 8, 32)
if err != nil {
log.Warningf("%s.GetFilesystem: invalid mode: %q", fsType.Name(), modeStr)
return nil, nil, syserror.EINVAL
}
- rootMode = linux.FileMode(mode)
+ fsopts.rootMode = linux.FileMode(mode)
+ } else {
+ ctx.Warningf("fusefs.FilesystemType.GetFilesystem: mandatory mount option rootmode missing")
+ return nil, nil, syserror.EINVAL
}
- fsopts.rootMode = rootMode
// Set the maxInFlightRequests option.
fsopts.maxActiveRequests = maxActiveRequestsDefault
@@ -192,6 +216,16 @@ func (fsType FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
fsopts.maxRead = math.MaxUint32
}
+ if _, ok := mopts["default_permissions"]; ok {
+ delete(mopts, "default_permissions")
+ fsopts.defaultPermissions = true
+ }
+
+ if _, ok := mopts["allow_other"]; ok {
+ delete(mopts, "allow_other")
+ fsopts.allowOther = true
+ }
+
// Check for unparsed options.
if len(mopts) != 0 {
log.Warningf("%s.GetFilesystem: unsupported or unknown options: %v", fsType.Name(), mopts)
@@ -260,6 +294,11 @@ func (fs *filesystem) Release(ctx context.Context) {
fs.Filesystem.Release(ctx)
}
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return fs.opts.mopts
+}
+
// inode implements kernfs.Inode.
//
// +stateify savable
@@ -318,6 +357,37 @@ func (fs *filesystem) newInode(ctx context.Context, nodeID uint64, attr linux.FU
return i
}
+// CheckPermissions implements kernfs.Inode.CheckPermissions.
+func (i *inode) CheckPermissions(ctx context.Context, creds *auth.Credentials, ats vfs.AccessTypes) error {
+ // Since FUSE operations are ultimately backed by a userspace process (the
+ // fuse daemon), allowing a process to call into fusefs grants the daemon
+ // ptrace-like capabilities over the calling process. Because of this, by
+ // default FUSE only allows the mount owner to interact with the
+ // filesystem. This explicitly excludes setuid/setgid processes.
+ //
+ // This behaviour can be overriden with the 'allow_other' mount option.
+ //
+ // See fs/fuse/dir.c:fuse_allow_current_process() in Linux.
+ if !i.fs.opts.allowOther {
+ if creds.RealKUID != i.fs.opts.uid ||
+ creds.EffectiveKUID != i.fs.opts.uid ||
+ creds.SavedKUID != i.fs.opts.uid ||
+ creds.RealKGID != i.fs.opts.gid ||
+ creds.EffectiveKGID != i.fs.opts.gid ||
+ creds.SavedKGID != i.fs.opts.gid {
+ return syserror.EACCES
+ }
+ }
+
+ // By default, fusefs delegates all permission checks to the server.
+ // However, standard unix permission checks can be enabled with the
+ // default_permissions mount option.
+ if i.fs.opts.defaultPermissions {
+ return i.InodeAttrs.CheckPermissions(ctx, creds, ats)
+ }
+ return nil
+}
+
// Open implements kernfs.Inode.Open.
func (i *inode) Open(ctx context.Context, rp *vfs.ResolvingPath, d *kernfs.Dentry, opts vfs.OpenOptions) (*vfs.FileDescription, error) {
isDir := i.InodeAttrs.Mode().IsDir()
diff --git a/pkg/sentry/fsimpl/gofer/filesystem.go b/pkg/sentry/fsimpl/gofer/filesystem.go
index 8f95473b6..c34451269 100644
--- a/pkg/sentry/fsimpl/gofer/filesystem.go
+++ b/pkg/sentry/fsimpl/gofer/filesystem.go
@@ -15,7 +15,9 @@
package gofer
import (
+ "fmt"
"math"
+ "strings"
"sync"
"sync/atomic"
@@ -1608,3 +1610,58 @@ func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDe
defer fs.renameMu.RUnlock()
return genericPrependPath(vfsroot, vd.Mount(), vd.Dentry().Impl().(*dentry), b)
}
+
+type mopt struct {
+ key string
+ value interface{}
+}
+
+func (m mopt) String() string {
+ if m.value == nil {
+ return fmt.Sprintf("%s", m.key)
+ }
+ return fmt.Sprintf("%s=%v", m.key, m.value)
+}
+
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ optsKV := []mopt{
+ {moptTransport, transportModeFD}, // Only valid value, currently.
+ {moptReadFD, fs.opts.fd}, // Currently, read and write FD are the same.
+ {moptWriteFD, fs.opts.fd}, // Currently, read and write FD are the same.
+ {moptAname, fs.opts.aname},
+ {moptDfltUID, fs.opts.dfltuid},
+ {moptDfltGID, fs.opts.dfltgid},
+ {moptMsize, fs.opts.msize},
+ {moptVersion, fs.opts.version},
+ {moptDentryCacheLimit, fs.opts.maxCachedDentries},
+ }
+
+ switch fs.opts.interop {
+ case InteropModeExclusive:
+ optsKV = append(optsKV, mopt{moptCache, cacheFSCache})
+ case InteropModeWritethrough:
+ optsKV = append(optsKV, mopt{moptCache, cacheFSCacheWritethrough})
+ case InteropModeShared:
+ if fs.opts.regularFilesUseSpecialFileFD {
+ optsKV = append(optsKV, mopt{moptCache, cacheNone})
+ } else {
+ optsKV = append(optsKV, mopt{moptCache, cacheRemoteRevalidating})
+ }
+ }
+ if fs.opts.forcePageCache {
+ optsKV = append(optsKV, mopt{moptForcePageCache, nil})
+ }
+ if fs.opts.limitHostFDTranslation {
+ optsKV = append(optsKV, mopt{moptLimitHostFDTranslation, nil})
+ }
+ if fs.opts.overlayfsStaleRead {
+ optsKV = append(optsKV, mopt{moptOverlayfsStaleRead, nil})
+ }
+
+ opts := make([]string, 0, len(optsKV))
+ for _, opt := range optsKV {
+ opts = append(opts, opt.String())
+ }
+ return strings.Join(opts, ",")
+}
diff --git a/pkg/sentry/fsimpl/gofer/gofer.go b/pkg/sentry/fsimpl/gofer/gofer.go
index 1508cbdf1..71569dc65 100644
--- a/pkg/sentry/fsimpl/gofer/gofer.go
+++ b/pkg/sentry/fsimpl/gofer/gofer.go
@@ -66,6 +66,34 @@ import (
// Name is the default filesystem name.
const Name = "9p"
+// Mount option names for goferfs.
+const (
+ moptTransport = "trans"
+ moptReadFD = "rfdno"
+ moptWriteFD = "wfdno"
+ moptAname = "aname"
+ moptDfltUID = "dfltuid"
+ moptDfltGID = "dfltgid"
+ moptMsize = "msize"
+ moptVersion = "version"
+ moptDentryCacheLimit = "dentry_cache_limit"
+ moptCache = "cache"
+ moptForcePageCache = "force_page_cache"
+ moptLimitHostFDTranslation = "limit_host_fd_translation"
+ moptOverlayfsStaleRead = "overlayfs_stale_read"
+)
+
+// Valid values for the "cache" mount option.
+const (
+ cacheNone = "none"
+ cacheFSCache = "fscache"
+ cacheFSCacheWritethrough = "fscache_writethrough"
+ cacheRemoteRevalidating = "remote_revalidating"
+)
+
+// Valid values for "trans" mount option.
+const transportModeFD = "fd"
+
// FilesystemType implements vfs.FilesystemType.
//
// +stateify savable
@@ -301,39 +329,39 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
// Get the attach name.
fsopts.aname = "/"
- if aname, ok := mopts["aname"]; ok {
- delete(mopts, "aname")
+ if aname, ok := mopts[moptAname]; ok {
+ delete(mopts, moptAname)
fsopts.aname = aname
}
// Parse the cache policy. For historical reasons, this defaults to the
// least generally-applicable option, InteropModeExclusive.
fsopts.interop = InteropModeExclusive
- if cache, ok := mopts["cache"]; ok {
- delete(mopts, "cache")
+ if cache, ok := mopts[moptCache]; ok {
+ delete(mopts, moptCache)
switch cache {
- case "fscache":
+ case cacheFSCache:
fsopts.interop = InteropModeExclusive
- case "fscache_writethrough":
+ case cacheFSCacheWritethrough:
fsopts.interop = InteropModeWritethrough
- case "none":
+ case cacheNone:
fsopts.regularFilesUseSpecialFileFD = true
fallthrough
- case "remote_revalidating":
+ case cacheRemoteRevalidating:
fsopts.interop = InteropModeShared
default:
- ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid cache policy: cache=%s", cache)
+ ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid cache policy: %s=%s", moptCache, cache)
return nil, nil, syserror.EINVAL
}
}
// Parse the default UID and GID.
fsopts.dfltuid = _V9FS_DEFUID
- if dfltuidstr, ok := mopts["dfltuid"]; ok {
- delete(mopts, "dfltuid")
+ if dfltuidstr, ok := mopts[moptDfltUID]; ok {
+ delete(mopts, moptDfltUID)
dfltuid, err := strconv.ParseUint(dfltuidstr, 10, 32)
if err != nil {
- ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid default UID: dfltuid=%s", dfltuidstr)
+ ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid default UID: %s=%s", moptDfltUID, dfltuidstr)
return nil, nil, syserror.EINVAL
}
// In Linux, dfltuid is interpreted as a UID and is converted to a KUID
@@ -342,11 +370,11 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
fsopts.dfltuid = auth.KUID(dfltuid)
}
fsopts.dfltgid = _V9FS_DEFGID
- if dfltgidstr, ok := mopts["dfltgid"]; ok {
- delete(mopts, "dfltgid")
+ if dfltgidstr, ok := mopts[moptDfltGID]; ok {
+ delete(mopts, moptDfltGID)
dfltgid, err := strconv.ParseUint(dfltgidstr, 10, 32)
if err != nil {
- ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid default UID: dfltgid=%s", dfltgidstr)
+ ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid default UID: %s=%s", moptDfltGID, dfltgidstr)
return nil, nil, syserror.EINVAL
}
fsopts.dfltgid = auth.KGID(dfltgid)
@@ -354,11 +382,11 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
// Parse the 9P message size.
fsopts.msize = 1024 * 1024 // 1M, tested to give good enough performance up to 64M
- if msizestr, ok := mopts["msize"]; ok {
- delete(mopts, "msize")
+ if msizestr, ok := mopts[moptMsize]; ok {
+ delete(mopts, moptMsize)
msize, err := strconv.ParseUint(msizestr, 10, 32)
if err != nil {
- ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid message size: msize=%s", msizestr)
+ ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid message size: %s=%s", moptMsize, msizestr)
return nil, nil, syserror.EINVAL
}
fsopts.msize = uint32(msize)
@@ -366,34 +394,34 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
// Parse the 9P protocol version.
fsopts.version = p9.HighestVersionString()
- if version, ok := mopts["version"]; ok {
- delete(mopts, "version")
+ if version, ok := mopts[moptVersion]; ok {
+ delete(mopts, moptVersion)
fsopts.version = version
}
// Parse the dentry cache limit.
fsopts.maxCachedDentries = 1000
- if str, ok := mopts["dentry_cache_limit"]; ok {
- delete(mopts, "dentry_cache_limit")
+ if str, ok := mopts[moptDentryCacheLimit]; ok {
+ delete(mopts, moptDentryCacheLimit)
maxCachedDentries, err := strconv.ParseUint(str, 10, 64)
if err != nil {
- ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid dentry cache limit: dentry_cache_limit=%s", str)
+ ctx.Warningf("gofer.FilesystemType.GetFilesystem: invalid dentry cache limit: %s=%s", moptDentryCacheLimit, str)
return nil, nil, syserror.EINVAL
}
fsopts.maxCachedDentries = maxCachedDentries
}
// Handle simple flags.
- if _, ok := mopts["force_page_cache"]; ok {
- delete(mopts, "force_page_cache")
+ if _, ok := mopts[moptForcePageCache]; ok {
+ delete(mopts, moptForcePageCache)
fsopts.forcePageCache = true
}
- if _, ok := mopts["limit_host_fd_translation"]; ok {
- delete(mopts, "limit_host_fd_translation")
+ if _, ok := mopts[moptLimitHostFDTranslation]; ok {
+ delete(mopts, moptLimitHostFDTranslation)
fsopts.limitHostFDTranslation = true
}
- if _, ok := mopts["overlayfs_stale_read"]; ok {
- delete(mopts, "overlayfs_stale_read")
+ if _, ok := mopts[moptOverlayfsStaleRead]; ok {
+ delete(mopts, moptOverlayfsStaleRead)
fsopts.overlayfsStaleRead = true
}
// fsopts.regularFilesUseSpecialFileFD can only be enabled by specifying
@@ -469,34 +497,34 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
func getFDFromMountOptionsMap(ctx context.Context, mopts map[string]string) (int, error) {
// Check that the transport is "fd".
- trans, ok := mopts["trans"]
- if !ok || trans != "fd" {
- ctx.Warningf("gofer.getFDFromMountOptionsMap: transport must be specified as 'trans=fd'")
+ trans, ok := mopts[moptTransport]
+ if !ok || trans != transportModeFD {
+ ctx.Warningf("gofer.getFDFromMountOptionsMap: transport must be specified as '%s=%s'", moptTransport, transportModeFD)
return -1, syserror.EINVAL
}
- delete(mopts, "trans")
+ delete(mopts, moptTransport)
// Check that read and write FDs are provided and identical.
- rfdstr, ok := mopts["rfdno"]
+ rfdstr, ok := mopts[moptReadFD]
if !ok {
- ctx.Warningf("gofer.getFDFromMountOptionsMap: read FD must be specified as 'rfdno=<file descriptor>'")
+ ctx.Warningf("gofer.getFDFromMountOptionsMap: read FD must be specified as '%s=<file descriptor>'", moptReadFD)
return -1, syserror.EINVAL
}
- delete(mopts, "rfdno")
+ delete(mopts, moptReadFD)
rfd, err := strconv.Atoi(rfdstr)
if err != nil {
- ctx.Warningf("gofer.getFDFromMountOptionsMap: invalid read FD: rfdno=%s", rfdstr)
+ ctx.Warningf("gofer.getFDFromMountOptionsMap: invalid read FD: %s=%s", moptReadFD, rfdstr)
return -1, syserror.EINVAL
}
- wfdstr, ok := mopts["wfdno"]
+ wfdstr, ok := mopts[moptWriteFD]
if !ok {
- ctx.Warningf("gofer.getFDFromMountOptionsMap: write FD must be specified as 'wfdno=<file descriptor>'")
+ ctx.Warningf("gofer.getFDFromMountOptionsMap: write FD must be specified as '%s=<file descriptor>'", moptWriteFD)
return -1, syserror.EINVAL
}
- delete(mopts, "wfdno")
+ delete(mopts, moptWriteFD)
wfd, err := strconv.Atoi(wfdstr)
if err != nil {
- ctx.Warningf("gofer.getFDFromMountOptionsMap: invalid write FD: wfdno=%s", wfdstr)
+ ctx.Warningf("gofer.getFDFromMountOptionsMap: invalid write FD: %s=%s", moptWriteFD, wfdstr)
return -1, syserror.EINVAL
}
if rfd != wfd {
diff --git a/pkg/sentry/fsimpl/host/host.go b/pkg/sentry/fsimpl/host/host.go
index ad5de80dc..b9cce4181 100644
--- a/pkg/sentry/fsimpl/host/host.go
+++ b/pkg/sentry/fsimpl/host/host.go
@@ -260,6 +260,11 @@ func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDe
return vfs.PrependPathSyntheticError{}
}
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return ""
+}
+
// CheckPermissions implements kernfs.Inode.CheckPermissions.
func (i *inode) CheckPermissions(ctx context.Context, creds *auth.Credentials, ats vfs.AccessTypes) error {
var s unix.Stat_t
diff --git a/pkg/sentry/fsimpl/kernfs/kernfs_test.go b/pkg/sentry/fsimpl/kernfs/kernfs_test.go
index e63588e33..1cd3137e6 100644
--- a/pkg/sentry/fsimpl/kernfs/kernfs_test.go
+++ b/pkg/sentry/fsimpl/kernfs/kernfs_test.go
@@ -67,6 +67,11 @@ type filesystem struct {
kernfs.Filesystem
}
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return ""
+}
+
type file struct {
kernfs.DynamicBytesFile
content string
diff --git a/pkg/sentry/fsimpl/overlay/filesystem.go b/pkg/sentry/fsimpl/overlay/filesystem.go
index f7f795b10..84e37f793 100644
--- a/pkg/sentry/fsimpl/overlay/filesystem.go
+++ b/pkg/sentry/fsimpl/overlay/filesystem.go
@@ -85,6 +85,8 @@ func putDentrySlice(ds *[]*dentry) {
// but dentry slices are allocated lazily, and it's much easier to say "defer
// fs.renameMuRUnlockAndCheckDrop(&ds)" than "defer func() {
// fs.renameMuRUnlockAndCheckDrop(ds) }()" to work around this.
+//
+// +checklocks:fs.renameMu
func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, dsp **[]*dentry) {
fs.renameMu.RUnlock()
if *dsp == nil {
@@ -110,6 +112,7 @@ func (fs *filesystem) renameMuRUnlockAndCheckDrop(ctx context.Context, dsp **[]*
putDentrySlice(*dsp)
}
+// +checklocks:fs.renameMu
func (fs *filesystem) renameMuUnlockAndCheckDrop(ctx context.Context, ds **[]*dentry) {
if *ds == nil {
fs.renameMu.Unlock()
@@ -1761,3 +1764,15 @@ func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDe
defer fs.renameMu.RUnlock()
return genericPrependPath(vfsroot, vd.Mount(), vd.Dentry().Impl().(*dentry), b)
}
+
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ // Return the mount options from the topmost layer.
+ var vd vfs.VirtualDentry
+ if fs.opts.UpperRoot.Ok() {
+ vd = fs.opts.UpperRoot
+ } else {
+ vd = fs.opts.LowerRoots[0]
+ }
+ return vd.Mount().Filesystem().Impl().MountOptions()
+}
diff --git a/pkg/sentry/fsimpl/pipefs/pipefs.go b/pkg/sentry/fsimpl/pipefs/pipefs.go
index 429733c10..3f05e444e 100644
--- a/pkg/sentry/fsimpl/pipefs/pipefs.go
+++ b/pkg/sentry/fsimpl/pipefs/pipefs.go
@@ -80,6 +80,11 @@ func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDe
return vfs.PrependPathSyntheticError{}
}
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return ""
+}
+
// inode implements kernfs.Inode.
//
// +stateify savable
diff --git a/pkg/sentry/fsimpl/proc/filesystem.go b/pkg/sentry/fsimpl/proc/filesystem.go
index 8716d0a3c..254a8b062 100644
--- a/pkg/sentry/fsimpl/proc/filesystem.go
+++ b/pkg/sentry/fsimpl/proc/filesystem.go
@@ -104,6 +104,11 @@ func (fs *filesystem) Release(ctx context.Context) {
fs.Filesystem.Release(ctx)
}
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return fmt.Sprintf("dentry_cache_limit=%d", fs.MaxCachedDentries)
+}
+
// dynamicInode is an overfitted interface for common Inodes with
// dynamicByteSource types used in procfs.
//
diff --git a/pkg/sentry/fsimpl/proc/tasks_sys.go b/pkg/sentry/fsimpl/proc/tasks_sys.go
index fd7823daa..fb274b78e 100644
--- a/pkg/sentry/fsimpl/proc/tasks_sys.go
+++ b/pkg/sentry/fsimpl/proc/tasks_sys.go
@@ -17,6 +17,7 @@ package proc
import (
"bytes"
"fmt"
+ "math"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
@@ -69,17 +70,17 @@ func (fs *filesystem) newSysNetDir(ctx context.Context, root *auth.Credentials,
if stack := k.RootNetworkNamespace().Stack(); stack != nil {
contents = map[string]kernfs.Inode{
"ipv4": fs.newStaticDir(ctx, root, map[string]kernfs.Inode{
- "tcp_recovery": fs.newInode(ctx, root, 0644, &tcpRecoveryData{stack: stack}),
- "tcp_rmem": fs.newInode(ctx, root, 0644, &tcpMemData{stack: stack, dir: tcpRMem}),
- "tcp_sack": fs.newInode(ctx, root, 0644, &tcpSackData{stack: stack}),
- "tcp_wmem": fs.newInode(ctx, root, 0644, &tcpMemData{stack: stack, dir: tcpWMem}),
- "ip_forward": fs.newInode(ctx, root, 0444, &ipForwarding{stack: stack}),
+ "ip_forward": fs.newInode(ctx, root, 0444, &ipForwarding{stack: stack}),
+ "ip_local_port_range": fs.newInode(ctx, root, 0644, &portRange{stack: stack}),
+ "tcp_recovery": fs.newInode(ctx, root, 0644, &tcpRecoveryData{stack: stack}),
+ "tcp_rmem": fs.newInode(ctx, root, 0644, &tcpMemData{stack: stack, dir: tcpRMem}),
+ "tcp_sack": fs.newInode(ctx, root, 0644, &tcpSackData{stack: stack}),
+ "tcp_wmem": fs.newInode(ctx, root, 0644, &tcpMemData{stack: stack, dir: tcpWMem}),
// The following files are simple stubs until they are implemented in
// netstack, most of these files are configuration related. We use the
// value closest to the actual netstack behavior or any empty file, all
// of these files will have mode 0444 (read-only for all users).
- "ip_local_port_range": fs.newInode(ctx, root, 0444, newStaticFile("16000 65535")),
"ip_local_reserved_ports": fs.newInode(ctx, root, 0444, newStaticFile("")),
"ipfrag_time": fs.newInode(ctx, root, 0444, newStaticFile("30")),
"ip_nonlocal_bind": fs.newInode(ctx, root, 0444, newStaticFile("0")),
@@ -421,3 +422,68 @@ func (ipf *ipForwarding) Write(ctx context.Context, src usermem.IOSequence, offs
}
return n, nil
}
+
+// portRange implements vfs.WritableDynamicBytesSource for
+// /proc/sys/net/ipv4/ip_local_port_range.
+//
+// +stateify savable
+type portRange struct {
+ kernfs.DynamicBytesFile
+
+ stack inet.Stack `state:"wait"`
+
+ // start and end store the port range. We must save/restore this here,
+ // since a netstack instance is created on restore.
+ start *uint16
+ end *uint16
+}
+
+var _ vfs.WritableDynamicBytesSource = (*portRange)(nil)
+
+// Generate implements vfs.DynamicBytesSource.Generate.
+func (pr *portRange) Generate(ctx context.Context, buf *bytes.Buffer) error {
+ if pr.start == nil {
+ start, end := pr.stack.PortRange()
+ pr.start = &start
+ pr.end = &end
+ }
+ _, err := fmt.Fprintf(buf, "%d %d\n", *pr.start, *pr.end)
+ return err
+}
+
+// Write implements vfs.WritableDynamicBytesSource.Write.
+func (pr *portRange) Write(ctx context.Context, src usermem.IOSequence, offset int64) (int64, error) {
+ if offset != 0 {
+ // No need to handle partial writes thus far.
+ return 0, syserror.EINVAL
+ }
+ if src.NumBytes() == 0 {
+ return 0, nil
+ }
+
+ // Limit input size so as not to impact performance if input size is
+ // large.
+ src = src.TakeFirst(usermem.PageSize - 1)
+
+ ports := make([]int32, 2)
+ n, err := usermem.CopyInt32StringsInVec(ctx, src.IO, src.Addrs, ports, src.Opts)
+ if err != nil {
+ return 0, err
+ }
+
+ // Port numbers must be uint16s.
+ if ports[0] < 0 || ports[1] < 0 || ports[0] > math.MaxUint16 || ports[1] > math.MaxUint16 {
+ return 0, syserror.EINVAL
+ }
+
+ if err := pr.stack.SetPortRange(uint16(ports[0]), uint16(ports[1])); err != nil {
+ return 0, err
+ }
+ if pr.start == nil {
+ pr.start = new(uint16)
+ pr.end = new(uint16)
+ }
+ *pr.start = uint16(ports[0])
+ *pr.end = uint16(ports[1])
+ return n, nil
+}
diff --git a/pkg/sentry/fsimpl/sockfs/sockfs.go b/pkg/sentry/fsimpl/sockfs/sockfs.go
index fda1fa942..735756280 100644
--- a/pkg/sentry/fsimpl/sockfs/sockfs.go
+++ b/pkg/sentry/fsimpl/sockfs/sockfs.go
@@ -85,6 +85,11 @@ func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDe
return vfs.PrependPathSyntheticError{}
}
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return ""
+}
+
// inode implements kernfs.Inode.
//
// +stateify savable
diff --git a/pkg/sentry/fsimpl/sys/sys.go b/pkg/sentry/fsimpl/sys/sys.go
index dbd9ebdda..1d9280dae 100644
--- a/pkg/sentry/fsimpl/sys/sys.go
+++ b/pkg/sentry/fsimpl/sys/sys.go
@@ -143,6 +143,11 @@ func (fs *filesystem) Release(ctx context.Context) {
fs.Filesystem.Release(ctx)
}
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return fmt.Sprintf("dentry_cache_limit=%d", fs.MaxCachedDentries)
+}
+
// dir implements kernfs.Inode.
//
// +stateify savable
diff --git a/pkg/sentry/fsimpl/tmpfs/filesystem.go b/pkg/sentry/fsimpl/tmpfs/filesystem.go
index 4f675c21e..5fdca1d46 100644
--- a/pkg/sentry/fsimpl/tmpfs/filesystem.go
+++ b/pkg/sentry/fsimpl/tmpfs/filesystem.go
@@ -898,3 +898,8 @@ func (fs *filesystem) PrependPath(ctx context.Context, vfsroot, vd vfs.VirtualDe
d = d.parent
}
}
+
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return fs.mopts
+}
diff --git a/pkg/sentry/fsimpl/tmpfs/tmpfs.go b/pkg/sentry/fsimpl/tmpfs/tmpfs.go
index a01e413e0..8df81f589 100644
--- a/pkg/sentry/fsimpl/tmpfs/tmpfs.go
+++ b/pkg/sentry/fsimpl/tmpfs/tmpfs.go
@@ -70,6 +70,10 @@ type filesystem struct {
// devMinor is the filesystem's minor device number. devMinor is immutable.
devMinor uint32
+ // mopts contains the tmpfs-specific mount options passed to this
+ // filesystem. Immutable.
+ mopts string
+
// mu serializes changes to the Dentry tree.
mu sync.RWMutex `state:"nosave"`
@@ -184,6 +188,7 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
mfp: mfp,
clock: clock,
devMinor: devMinor,
+ mopts: opts.Data,
}
fs.vfsfs.Init(vfsObj, newFSType, &fs)
diff --git a/pkg/sentry/fsimpl/verity/filesystem.go b/pkg/sentry/fsimpl/verity/filesystem.go
index 9057d2b4e..6cb1a23e0 100644
--- a/pkg/sentry/fsimpl/verity/filesystem.go
+++ b/pkg/sentry/fsimpl/verity/filesystem.go
@@ -590,6 +590,23 @@ func (fs *filesystem) lookupAndVerifyLocked(ctx context.Context, parent *dentry,
return nil, err
}
+ // Clear the Merkle tree file if they are to be generated at runtime.
+ // TODO(b/182315468): Optimize the Merkle tree generate process to
+ // allow only updating certain files/directories.
+ if fs.allowRuntimeEnable {
+ childMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
+ Root: childMerkleVD,
+ Start: childMerkleVD,
+ }, &vfs.OpenOptions{
+ Flags: linux.O_RDWR | linux.O_TRUNC,
+ Mode: 0644,
+ })
+ if err != nil {
+ return nil, err
+ }
+ childMerkleFD.DecRef(ctx)
+ }
+
// The dentry needs to be cleaned up if any error occurs. IncRef will be
// called if a verity child dentry is successfully created.
defer childMerkleVD.DecRef(ctx)
diff --git a/pkg/sentry/fsimpl/verity/verity.go b/pkg/sentry/fsimpl/verity/verity.go
index 374f71568..0d9b0ee2c 100644
--- a/pkg/sentry/fsimpl/verity/verity.go
+++ b/pkg/sentry/fsimpl/verity/verity.go
@@ -38,6 +38,7 @@ import (
"fmt"
"math"
"strconv"
+ "strings"
"sync/atomic"
"gvisor.dev/gvisor/pkg/abi/linux"
@@ -310,6 +311,24 @@ func (fstype FilesystemType) GetFilesystem(ctx context.Context, vfsObj *vfs.Virt
d.DecRef(ctx)
return nil, nil, alertIntegrityViolation("Failed to find root Merkle file")
}
+
+ // Clear the Merkle tree file if they are to be generated at runtime.
+ // TODO(b/182315468): Optimize the Merkle tree generate process to
+ // allow only updating certain files/directories.
+ if fs.allowRuntimeEnable {
+ lowerMerkleFD, err := vfsObj.OpenAt(ctx, fs.creds, &vfs.PathOperation{
+ Root: lowerMerkleVD,
+ Start: lowerMerkleVD,
+ }, &vfs.OpenOptions{
+ Flags: linux.O_RDWR | linux.O_TRUNC,
+ Mode: 0644,
+ })
+ if err != nil {
+ return nil, nil, err
+ }
+ lowerMerkleFD.DecRef(ctx)
+ }
+
d.lowerMerkleVD = lowerMerkleVD
// Get metadata from the underlying file system.
@@ -418,6 +437,11 @@ func (fs *filesystem) Release(ctx context.Context) {
fs.lowerMount.DecRef(ctx)
}
+// MountOptions implements vfs.FilesystemImpl.MountOptions.
+func (fs *filesystem) MountOptions() string {
+ return ""
+}
+
// dentry implements vfs.DentryImpl.
//
// +stateify savable
@@ -750,6 +774,50 @@ func (fd *fileDescription) SetStat(ctx context.Context, opts vfs.SetStatOptions)
return syserror.EPERM
}
+// IterDirents implements vfs.FileDescriptionImpl.IterDirents.
+func (fd *fileDescription) IterDirents(ctx context.Context, cb vfs.IterDirentsCallback) error {
+ if !fd.d.isDir() {
+ return syserror.ENOTDIR
+ }
+ fd.mu.Lock()
+ defer fd.mu.Unlock()
+
+ var ds []vfs.Dirent
+ err := fd.lowerFD.IterDirents(ctx, vfs.IterDirentsCallbackFunc(func(dirent vfs.Dirent) error {
+ // Do not include the Merkle tree files.
+ if strings.Contains(dirent.Name, merklePrefix) || strings.Contains(dirent.Name, merkleRootPrefix) {
+ return nil
+ }
+ if fd.d.verityEnabled() {
+ // Verify that the child is expected.
+ if dirent.Name != "." && dirent.Name != ".." {
+ if _, ok := fd.d.childrenNames[dirent.Name]; !ok {
+ return alertIntegrityViolation(fmt.Sprintf("Unexpected children %s", dirent.Name))
+ }
+ }
+ }
+ ds = append(ds, dirent)
+ return nil
+ }))
+
+ if err != nil {
+ return err
+ }
+
+ // The result should contain all children plus "." and "..".
+ if fd.d.verityEnabled() && len(ds) != len(fd.d.childrenNames)+2 {
+ return alertIntegrityViolation(fmt.Sprintf("Unexpected children number %d", len(ds)))
+ }
+
+ for fd.off < int64(len(ds)) {
+ if err := cb.Handle(ds[fd.off]); err != nil {
+ return err
+ }
+ fd.off++
+ }
+ return nil
+}
+
// Seek implements vfs.FileDescriptionImpl.Seek.
func (fd *fileDescription) Seek(ctx context.Context, offset int64, whence int32) (int64, error) {
fd.mu.Lock()
diff --git a/pkg/sentry/inet/inet.go b/pkg/sentry/inet/inet.go
index f31277d30..6b71bd3a9 100644
--- a/pkg/sentry/inet/inet.go
+++ b/pkg/sentry/inet/inet.go
@@ -93,6 +93,14 @@ type Stack interface {
// SetForwarding enables or disables packet forwarding between NICs.
SetForwarding(protocol tcpip.NetworkProtocolNumber, enable bool) error
+
+ // PortRange returns the UDP and TCP inclusive range of ephemeral ports
+ // used in both IPv4 and IPv6.
+ PortRange() (uint16, uint16)
+
+ // SetPortRange sets the UDP and TCP IPv4 and IPv6 ephemeral port range
+ // (inclusive).
+ SetPortRange(start uint16, end uint16) error
}
// Interface contains information about a network interface.
diff --git a/pkg/sentry/inet/test_stack.go b/pkg/sentry/inet/test_stack.go
index 9ebeba8a3..03e2608c2 100644
--- a/pkg/sentry/inet/test_stack.go
+++ b/pkg/sentry/inet/test_stack.go
@@ -164,3 +164,15 @@ func (s *TestStack) SetForwarding(protocol tcpip.NetworkProtocolNumber, enable b
s.IPForwarding = enable
return nil
}
+
+// PortRange implements inet.Stack.PortRange.
+func (*TestStack) PortRange() (uint16, uint16) {
+ // Use the default Linux values per net/ipv4/af_inet.c:inet_init_net().
+ return 32768, 28232
+}
+
+// SetPortRange implements inet.Stack.SetPortRange.
+func (*TestStack) SetPortRange(start uint16, end uint16) error {
+ // No-op.
+ return nil
+}
diff --git a/pkg/sentry/pgalloc/pgalloc.go b/pkg/sentry/pgalloc/pgalloc.go
index 58cc11a13..a4af3e21b 100644
--- a/pkg/sentry/pgalloc/pgalloc.go
+++ b/pkg/sentry/pgalloc/pgalloc.go
@@ -876,6 +876,7 @@ func (f *MemoryFile) UpdateUsage() error {
// in bs, sets committed[i] to 1 if the page is committed and 0 otherwise.
//
// Precondition: f.mu must be held; it may be unlocked and reacquired.
+// +checklocks:f.mu
func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(bs []byte, committed []byte) error) error {
// Track if anything changed to elide the merge. In the common case, we
// expect all segments to be committed and no merge to occur.
@@ -925,72 +926,73 @@ func (f *MemoryFile) updateUsageLocked(currentUsage uint64, checkCommitted func(
r := seg.Range()
var checkErr error
- err := f.forEachMappingSlice(r, func(s []byte) {
- if checkErr != nil {
- return
- }
-
- // Ensure that we have sufficient buffer for the call
- // (one byte per page). The length of each slice must
- // be page-aligned.
- bufLen := len(s) / usermem.PageSize
- if len(buf) < bufLen {
- buf = make([]byte, bufLen)
- }
+ err := f.forEachMappingSlice(r,
+ func(s []byte) {
+ if checkErr != nil {
+ return
+ }
- // Query for new pages in core.
- // NOTE(b/165896008): mincore (which is passed as checkCommitted)
- // by f.UpdateUsage() might take a really long time. So unlock f.mu
- // while checkCommitted runs.
- f.mu.Unlock()
- err := checkCommitted(s, buf)
- f.mu.Lock()
- if err != nil {
- checkErr = err
- return
- }
+ // Ensure that we have sufficient buffer for the call
+ // (one byte per page). The length of each slice must
+ // be page-aligned.
+ bufLen := len(s) / usermem.PageSize
+ if len(buf) < bufLen {
+ buf = make([]byte, bufLen)
+ }
- // Scan each page and switch out segments.
- seg := f.usage.LowerBoundSegment(r.Start)
- for i := 0; i < bufLen; {
- if buf[i]&0x1 == 0 {
- i++
- continue
+ // Query for new pages in core.
+ // NOTE(b/165896008): mincore (which is passed as checkCommitted)
+ // by f.UpdateUsage() might take a really long time. So unlock f.mu
+ // while checkCommitted runs.
+ f.mu.Unlock()
+ err := checkCommitted(s, buf)
+ f.mu.Lock()
+ if err != nil {
+ checkErr = err
+ return
}
- // Scan to the end of this committed range.
- j := i + 1
- for ; j < bufLen; j++ {
- if buf[j]&0x1 == 0 {
- break
+
+ // Scan each page and switch out segments.
+ seg := f.usage.LowerBoundSegment(r.Start)
+ for i := 0; i < bufLen; {
+ if buf[i]&0x1 == 0 {
+ i++
+ continue
}
- }
- committedFR := memmap.FileRange{
- Start: r.Start + uint64(i*usermem.PageSize),
- End: r.Start + uint64(j*usermem.PageSize),
- }
- // Advance seg to committedFR.Start.
- for seg.Ok() && seg.End() < committedFR.Start {
- seg = seg.NextSegment()
- }
- // Mark pages overlapping committedFR as committed.
- for seg.Ok() && seg.Start() < committedFR.End {
- if seg.ValuePtr().canCommit() {
- seg = f.usage.Isolate(seg, committedFR)
- seg.ValuePtr().knownCommitted = true
- amount := seg.Range().Length()
- usage.MemoryAccounting.Inc(amount, seg.ValuePtr().kind)
- f.usageExpected += amount
- changedAny = true
+ // Scan to the end of this committed range.
+ j := i + 1
+ for ; j < bufLen; j++ {
+ if buf[j]&0x1 == 0 {
+ break
+ }
}
- seg = seg.NextSegment()
+ committedFR := memmap.FileRange{
+ Start: r.Start + uint64(i*usermem.PageSize),
+ End: r.Start + uint64(j*usermem.PageSize),
+ }
+ // Advance seg to committedFR.Start.
+ for seg.Ok() && seg.End() < committedFR.Start {
+ seg = seg.NextSegment()
+ }
+ // Mark pages overlapping committedFR as committed.
+ for seg.Ok() && seg.Start() < committedFR.End {
+ if seg.ValuePtr().canCommit() {
+ seg = f.usage.Isolate(seg, committedFR)
+ seg.ValuePtr().knownCommitted = true
+ amount := seg.Range().Length()
+ usage.MemoryAccounting.Inc(amount, seg.ValuePtr().kind)
+ f.usageExpected += amount
+ changedAny = true
+ }
+ seg = seg.NextSegment()
+ }
+ // Continue scanning for committed pages.
+ i = j + 1
}
- // Continue scanning for committed pages.
- i = j + 1
- }
- // Advance r.Start.
- r.Start += uint64(len(s))
- })
+ // Advance r.Start.
+ r.Start += uint64(len(s))
+ })
if checkErr != nil {
return checkErr
}
diff --git a/pkg/sentry/socket/hostinet/stack.go b/pkg/sentry/socket/hostinet/stack.go
index e6323244c..5bcf92e14 100644
--- a/pkg/sentry/socket/hostinet/stack.go
+++ b/pkg/sentry/socket/hostinet/stack.go
@@ -504,3 +504,14 @@ func (s *Stack) Forwarding(protocol tcpip.NetworkProtocolNumber) bool {
func (s *Stack) SetForwarding(tcpip.NetworkProtocolNumber, bool) error {
return syserror.EACCES
}
+
+// PortRange implements inet.Stack.PortRange.
+func (*Stack) PortRange() (uint16, uint16) {
+ // Use the default Linux values per net/ipv4/af_inet.c:inet_init_net().
+ return 32768, 28232
+}
+
+// SetPortRange implements inet.Stack.SetPortRange.
+func (*Stack) SetPortRange(start uint16, end uint16) error {
+ return syserror.EACCES
+}
diff --git a/pkg/sentry/socket/netstack/netstack.go b/pkg/sentry/socket/netstack/netstack.go
index f2dc7c90b..9efb195f0 100644
--- a/pkg/sentry/socket/netstack/netstack.go
+++ b/pkg/sentry/socket/netstack/netstack.go
@@ -83,110 +83,121 @@ var Metrics = tcpip.Stats{
V4: tcpip.ICMPv4Stats{
PacketsSent: tcpip.ICMPv4SentPacketStats{
ICMPv4PacketStats: tcpip.ICMPv4PacketStats{
- Echo: mustCreateMetric("/netstack/icmp/v4/packets_sent/echo", "Total number of ICMPv4 echo packets sent by netstack."),
- EchoReply: mustCreateMetric("/netstack/icmp/v4/packets_sent/echo_reply", "Total number of ICMPv4 echo reply packets sent by netstack."),
- DstUnreachable: mustCreateMetric("/netstack/icmp/v4/packets_sent/dst_unreachable", "Total number of ICMPv4 destination unreachable packets sent by netstack."),
- SrcQuench: mustCreateMetric("/netstack/icmp/v4/packets_sent/src_quench", "Total number of ICMPv4 source quench packets sent by netstack."),
- Redirect: mustCreateMetric("/netstack/icmp/v4/packets_sent/redirect", "Total number of ICMPv4 redirect packets sent by netstack."),
- TimeExceeded: mustCreateMetric("/netstack/icmp/v4/packets_sent/time_exceeded", "Total number of ICMPv4 time exceeded packets sent by netstack."),
- ParamProblem: mustCreateMetric("/netstack/icmp/v4/packets_sent/param_problem", "Total number of ICMPv4 parameter problem packets sent by netstack."),
- Timestamp: mustCreateMetric("/netstack/icmp/v4/packets_sent/timestamp", "Total number of ICMPv4 timestamp packets sent by netstack."),
- TimestampReply: mustCreateMetric("/netstack/icmp/v4/packets_sent/timestamp_reply", "Total number of ICMPv4 timestamp reply packets sent by netstack."),
- InfoRequest: mustCreateMetric("/netstack/icmp/v4/packets_sent/info_request", "Total number of ICMPv4 information request packets sent by netstack."),
- InfoReply: mustCreateMetric("/netstack/icmp/v4/packets_sent/info_reply", "Total number of ICMPv4 information reply packets sent by netstack."),
+ EchoRequest: mustCreateMetric("/netstack/icmp/v4/packets_sent/echo_request", "Number of ICMPv4 echo request packets sent by netstack."),
+ EchoReply: mustCreateMetric("/netstack/icmp/v4/packets_sent/echo_reply", "Number of ICMPv4 echo reply packets sent by netstack."),
+ DstUnreachable: mustCreateMetric("/netstack/icmp/v4/packets_sent/dst_unreachable", "Number of ICMPv4 destination unreachable packets sent by netstack."),
+ SrcQuench: mustCreateMetric("/netstack/icmp/v4/packets_sent/src_quench", "Number of ICMPv4 source quench packets sent by netstack."),
+ Redirect: mustCreateMetric("/netstack/icmp/v4/packets_sent/redirect", "Number of ICMPv4 redirect packets sent by netstack."),
+ TimeExceeded: mustCreateMetric("/netstack/icmp/v4/packets_sent/time_exceeded", "Number of ICMPv4 time exceeded packets sent by netstack."),
+ ParamProblem: mustCreateMetric("/netstack/icmp/v4/packets_sent/param_problem", "Number of ICMPv4 parameter problem packets sent by netstack."),
+ Timestamp: mustCreateMetric("/netstack/icmp/v4/packets_sent/timestamp", "Number of ICMPv4 timestamp packets sent by netstack."),
+ TimestampReply: mustCreateMetric("/netstack/icmp/v4/packets_sent/timestamp_reply", "Number of ICMPv4 timestamp reply packets sent by netstack."),
+ InfoRequest: mustCreateMetric("/netstack/icmp/v4/packets_sent/info_request", "Number of ICMPv4 information request packets sent by netstack."),
+ InfoReply: mustCreateMetric("/netstack/icmp/v4/packets_sent/info_reply", "Number of ICMPv4 information reply packets sent by netstack."),
},
- Dropped: mustCreateMetric("/netstack/icmp/v4/packets_sent/dropped", "Total number of ICMPv4 packets dropped by netstack due to link layer errors."),
+ Dropped: mustCreateMetric("/netstack/icmp/v4/packets_sent/dropped", "Number of ICMPv4 packets dropped by netstack due to link layer errors."),
+ RateLimited: mustCreateMetric("/netstack/icmp/v4/packets_sent/rate_limited", "Number of ICMPv4 packets dropped by netstack due to rate limit being exceeded."),
},
PacketsReceived: tcpip.ICMPv4ReceivedPacketStats{
ICMPv4PacketStats: tcpip.ICMPv4PacketStats{
- Echo: mustCreateMetric("/netstack/icmp/v4/packets_received/echo", "Total number of ICMPv4 echo packets received by netstack."),
- EchoReply: mustCreateMetric("/netstack/icmp/v4/packets_received/echo_reply", "Total number of ICMPv4 echo reply packets received by netstack."),
- DstUnreachable: mustCreateMetric("/netstack/icmp/v4/packets_received/dst_unreachable", "Total number of ICMPv4 destination unreachable packets received by netstack."),
- SrcQuench: mustCreateMetric("/netstack/icmp/v4/packets_received/src_quench", "Total number of ICMPv4 source quench packets received by netstack."),
- Redirect: mustCreateMetric("/netstack/icmp/v4/packets_received/redirect", "Total number of ICMPv4 redirect packets received by netstack."),
- TimeExceeded: mustCreateMetric("/netstack/icmp/v4/packets_received/time_exceeded", "Total number of ICMPv4 time exceeded packets received by netstack."),
- ParamProblem: mustCreateMetric("/netstack/icmp/v4/packets_received/param_problem", "Total number of ICMPv4 parameter problem packets received by netstack."),
- Timestamp: mustCreateMetric("/netstack/icmp/v4/packets_received/timestamp", "Total number of ICMPv4 timestamp packets received by netstack."),
- TimestampReply: mustCreateMetric("/netstack/icmp/v4/packets_received/timestamp_reply", "Total number of ICMPv4 timestamp reply packets received by netstack."),
- InfoRequest: mustCreateMetric("/netstack/icmp/v4/packets_received/info_request", "Total number of ICMPv4 information request packets received by netstack."),
- InfoReply: mustCreateMetric("/netstack/icmp/v4/packets_received/info_reply", "Total number of ICMPv4 information reply packets received by netstack."),
+ EchoRequest: mustCreateMetric("/netstack/icmp/v4/packets_received/echo_request", "Number of ICMPv4 echo request packets received by netstack."),
+ EchoReply: mustCreateMetric("/netstack/icmp/v4/packets_received/echo_reply", "Number of ICMPv4 echo reply packets received by netstack."),
+ DstUnreachable: mustCreateMetric("/netstack/icmp/v4/packets_received/dst_unreachable", "Number of ICMPv4 destination unreachable packets received by netstack."),
+ SrcQuench: mustCreateMetric("/netstack/icmp/v4/packets_received/src_quench", "Number of ICMPv4 source quench packets received by netstack."),
+ Redirect: mustCreateMetric("/netstack/icmp/v4/packets_received/redirect", "Number of ICMPv4 redirect packets received by netstack."),
+ TimeExceeded: mustCreateMetric("/netstack/icmp/v4/packets_received/time_exceeded", "Number of ICMPv4 time exceeded packets received by netstack."),
+ ParamProblem: mustCreateMetric("/netstack/icmp/v4/packets_received/param_problem", "Number of ICMPv4 parameter problem packets received by netstack."),
+ Timestamp: mustCreateMetric("/netstack/icmp/v4/packets_received/timestamp", "Number of ICMPv4 timestamp packets received by netstack."),
+ TimestampReply: mustCreateMetric("/netstack/icmp/v4/packets_received/timestamp_reply", "Number of ICMPv4 timestamp reply packets received by netstack."),
+ InfoRequest: mustCreateMetric("/netstack/icmp/v4/packets_received/info_request", "Number of ICMPv4 information request packets received by netstack."),
+ InfoReply: mustCreateMetric("/netstack/icmp/v4/packets_received/info_reply", "Number of ICMPv4 information reply packets received by netstack."),
},
- Invalid: mustCreateMetric("/netstack/icmp/v4/packets_received/invalid", "Total number of ICMPv4 packets received that the transport layer could not parse."),
+ Invalid: mustCreateMetric("/netstack/icmp/v4/packets_received/invalid", "Number of ICMPv4 packets received that the transport layer could not parse."),
},
},
V6: tcpip.ICMPv6Stats{
PacketsSent: tcpip.ICMPv6SentPacketStats{
ICMPv6PacketStats: tcpip.ICMPv6PacketStats{
- EchoRequest: mustCreateMetric("/netstack/icmp/v6/packets_sent/echo_request", "Total number of ICMPv6 echo request packets sent by netstack."),
- EchoReply: mustCreateMetric("/netstack/icmp/v6/packets_sent/echo_reply", "Total number of ICMPv6 echo reply packets sent by netstack."),
- DstUnreachable: mustCreateMetric("/netstack/icmp/v6/packets_sent/dst_unreachable", "Total number of ICMPv6 destination unreachable packets sent by netstack."),
- PacketTooBig: mustCreateMetric("/netstack/icmp/v6/packets_sent/packet_too_big", "Total number of ICMPv6 packet too big packets sent by netstack."),
- TimeExceeded: mustCreateMetric("/netstack/icmp/v6/packets_sent/time_exceeded", "Total number of ICMPv6 time exceeded packets sent by netstack."),
- ParamProblem: mustCreateMetric("/netstack/icmp/v6/packets_sent/param_problem", "Total number of ICMPv6 parameter problem packets sent by netstack."),
- RouterSolicit: mustCreateMetric("/netstack/icmp/v6/packets_sent/router_solicit", "Total number of ICMPv6 router solicit packets sent by netstack."),
- RouterAdvert: mustCreateMetric("/netstack/icmp/v6/packets_sent/router_advert", "Total number of ICMPv6 router advert packets sent by netstack."),
- NeighborSolicit: mustCreateMetric("/netstack/icmp/v6/packets_sent/neighbor_solicit", "Total number of ICMPv6 neighbor solicit packets sent by netstack."),
- NeighborAdvert: mustCreateMetric("/netstack/icmp/v6/packets_sent/neighbor_advert", "Total number of ICMPv6 neighbor advert packets sent by netstack."),
- RedirectMsg: mustCreateMetric("/netstack/icmp/v6/packets_sent/redirect_msg", "Total number of ICMPv6 redirect message packets sent by netstack."),
+ EchoRequest: mustCreateMetric("/netstack/icmp/v6/packets_sent/echo_request", "Number of ICMPv6 echo request packets sent by netstack."),
+ EchoReply: mustCreateMetric("/netstack/icmp/v6/packets_sent/echo_reply", "Number of ICMPv6 echo reply packets sent by netstack."),
+ DstUnreachable: mustCreateMetric("/netstack/icmp/v6/packets_sent/dst_unreachable", "Number of ICMPv6 destination unreachable packets sent by netstack."),
+ PacketTooBig: mustCreateMetric("/netstack/icmp/v6/packets_sent/packet_too_big", "Number of ICMPv6 packet too big packets sent by netstack."),
+ TimeExceeded: mustCreateMetric("/netstack/icmp/v6/packets_sent/time_exceeded", "Number of ICMPv6 time exceeded packets sent by netstack."),
+ ParamProblem: mustCreateMetric("/netstack/icmp/v6/packets_sent/param_problem", "Number of ICMPv6 parameter problem packets sent by netstack."),
+ RouterSolicit: mustCreateMetric("/netstack/icmp/v6/packets_sent/router_solicit", "Number of ICMPv6 router solicit packets sent by netstack."),
+ RouterAdvert: mustCreateMetric("/netstack/icmp/v6/packets_sent/router_advert", "Number of ICMPv6 router advert packets sent by netstack."),
+ NeighborSolicit: mustCreateMetric("/netstack/icmp/v6/packets_sent/neighbor_solicit", "Number of ICMPv6 neighbor solicit packets sent by netstack."),
+ NeighborAdvert: mustCreateMetric("/netstack/icmp/v6/packets_sent/neighbor_advert", "Number of ICMPv6 neighbor advert packets sent by netstack."),
+ RedirectMsg: mustCreateMetric("/netstack/icmp/v6/packets_sent/redirect_msg", "Number of ICMPv6 redirect message packets sent by netstack."),
+ MulticastListenerQuery: mustCreateMetric("/netstack/icmp/v6/packets_sent/multicast_listener_query", "Number of ICMPv6 multicast listener query packets sent by netstack."),
+ MulticastListenerReport: mustCreateMetric("/netstack/icmp/v6/packets_sent/multicast_listener_report", "Number of ICMPv6 multicast listener report packets sent by netstack."),
+ MulticastListenerDone: mustCreateMetric("/netstack/icmp/v6/packets_sent/multicast_listener_done", "Number of ICMPv6 multicast listener done packets sent by netstack."),
},
- Dropped: mustCreateMetric("/netstack/icmp/v6/packets_sent/dropped", "Total number of ICMPv6 packets dropped by netstack due to link layer errors."),
+ Dropped: mustCreateMetric("/netstack/icmp/v6/packets_sent/dropped", "Number of ICMPv6 packets dropped by netstack due to link layer errors."),
+ RateLimited: mustCreateMetric("/netstack/icmp/v6/packets_sent/rate_limited", "Number of ICMPv6 packets dropped by netstack due to rate limit being exceeded."),
},
PacketsReceived: tcpip.ICMPv6ReceivedPacketStats{
ICMPv6PacketStats: tcpip.ICMPv6PacketStats{
- EchoRequest: mustCreateMetric("/netstack/icmp/v6/packets_received/echo_request", "Total number of ICMPv6 echo request packets received by netstack."),
- EchoReply: mustCreateMetric("/netstack/icmp/v6/packets_received/echo_reply", "Total number of ICMPv6 echo reply packets received by netstack."),
- DstUnreachable: mustCreateMetric("/netstack/icmp/v6/packets_received/dst_unreachable", "Total number of ICMPv6 destination unreachable packets received by netstack."),
- PacketTooBig: mustCreateMetric("/netstack/icmp/v6/packets_received/packet_too_big", "Total number of ICMPv6 packet too big packets received by netstack."),
- TimeExceeded: mustCreateMetric("/netstack/icmp/v6/packets_received/time_exceeded", "Total number of ICMPv6 time exceeded packets received by netstack."),
- ParamProblem: mustCreateMetric("/netstack/icmp/v6/packets_received/param_problem", "Total number of ICMPv6 parameter problem packets received by netstack."),
- RouterSolicit: mustCreateMetric("/netstack/icmp/v6/packets_received/router_solicit", "Total number of ICMPv6 router solicit packets received by netstack."),
- RouterAdvert: mustCreateMetric("/netstack/icmp/v6/packets_received/router_advert", "Total number of ICMPv6 router advert packets received by netstack."),
- NeighborSolicit: mustCreateMetric("/netstack/icmp/v6/packets_received/neighbor_solicit", "Total number of ICMPv6 neighbor solicit packets received by netstack."),
- NeighborAdvert: mustCreateMetric("/netstack/icmp/v6/packets_received/neighbor_advert", "Total number of ICMPv6 neighbor advert packets received by netstack."),
- RedirectMsg: mustCreateMetric("/netstack/icmp/v6/packets_received/redirect_msg", "Total number of ICMPv6 redirect message packets received by netstack."),
+ EchoRequest: mustCreateMetric("/netstack/icmp/v6/packets_received/echo_request", "Number of ICMPv6 echo request packets received by netstack."),
+ EchoReply: mustCreateMetric("/netstack/icmp/v6/packets_received/echo_reply", "Number of ICMPv6 echo reply packets received by netstack."),
+ DstUnreachable: mustCreateMetric("/netstack/icmp/v6/packets_received/dst_unreachable", "Number of ICMPv6 destination unreachable packets received by netstack."),
+ PacketTooBig: mustCreateMetric("/netstack/icmp/v6/packets_received/packet_too_big", "Number of ICMPv6 packet too big packets received by netstack."),
+ TimeExceeded: mustCreateMetric("/netstack/icmp/v6/packets_received/time_exceeded", "Number of ICMPv6 time exceeded packets received by netstack."),
+ ParamProblem: mustCreateMetric("/netstack/icmp/v6/packets_received/param_problem", "Number of ICMPv6 parameter problem packets received by netstack."),
+ RouterSolicit: mustCreateMetric("/netstack/icmp/v6/packets_received/router_solicit", "Number of ICMPv6 router solicit packets received by netstack."),
+ RouterAdvert: mustCreateMetric("/netstack/icmp/v6/packets_received/router_advert", "Number of ICMPv6 router advert packets received by netstack."),
+ NeighborSolicit: mustCreateMetric("/netstack/icmp/v6/packets_received/neighbor_solicit", "Number of ICMPv6 neighbor solicit packets received by netstack."),
+ NeighborAdvert: mustCreateMetric("/netstack/icmp/v6/packets_received/neighbor_advert", "Number of ICMPv6 neighbor advert packets received by netstack."),
+ RedirectMsg: mustCreateMetric("/netstack/icmp/v6/packets_received/redirect_msg", "Number of ICMPv6 redirect message packets received by netstack."),
+ MulticastListenerQuery: mustCreateMetric("/netstack/icmp/v6/packets_received/multicast_listener_query", "Number of ICMPv6 multicast listener query packets received by netstack."),
+ MulticastListenerReport: mustCreateMetric("/netstack/icmp/v6/packets_received/multicast_listener_report", "Number of ICMPv6 multicast listener report packets sent by netstack."),
+ MulticastListenerDone: mustCreateMetric("/netstack/icmp/v6/packets_received/multicast_listener_done", "Number of ICMPv6 multicast listener done packets sent by netstack."),
},
- Invalid: mustCreateMetric("/netstack/icmp/v6/packets_received/invalid", "Total number of ICMPv6 packets received that the transport layer could not parse."),
+ Unrecognized: mustCreateMetric("/netstack/icmp/v6/packets_received/unrecognized", "Number of ICMPv6 packets received that the transport layer does not know how to parse."),
+ Invalid: mustCreateMetric("/netstack/icmp/v6/packets_received/invalid", "Number of ICMPv6 packets received that the transport layer could not parse."),
+ RouterOnlyPacketsDroppedByHost: mustCreateMetric("/netstack/icmp/v6/packets_received/router_only_packets_dropped_by_host", "Number of ICMPv6 packets dropped due to being router-specific packets."),
},
},
},
IGMP: tcpip.IGMPStats{
PacketsSent: tcpip.IGMPSentPacketStats{
IGMPPacketStats: tcpip.IGMPPacketStats{
- MembershipQuery: mustCreateMetric("/netstack/igmp/packets_sent/membership_query", "Total number of IGMP Membership Query messages sent by netstack."),
- V1MembershipReport: mustCreateMetric("/netstack/igmp/packets_sent/v1_membership_report", "Total number of IGMPv1 Membership Report messages sent by netstack."),
- V2MembershipReport: mustCreateMetric("/netstack/igmp/packets_sent/v2_membership_report", "Total number of IGMPv2 Membership Report messages sent by netstack."),
- LeaveGroup: mustCreateMetric("/netstack/igmp/packets_sent/leave_group", "Total number of IGMP Leave Group messages sent by netstack."),
+ MembershipQuery: mustCreateMetric("/netstack/igmp/packets_sent/membership_query", "Number of IGMP Membership Query messages sent by netstack."),
+ V1MembershipReport: mustCreateMetric("/netstack/igmp/packets_sent/v1_membership_report", "Number of IGMPv1 Membership Report messages sent by netstack."),
+ V2MembershipReport: mustCreateMetric("/netstack/igmp/packets_sent/v2_membership_report", "Number of IGMPv2 Membership Report messages sent by netstack."),
+ LeaveGroup: mustCreateMetric("/netstack/igmp/packets_sent/leave_group", "Number of IGMP Leave Group messages sent by netstack."),
},
- Dropped: mustCreateMetric("/netstack/igmp/packets_sent/dropped", "Total number of IGMP packets dropped by netstack due to link layer errors."),
+ Dropped: mustCreateMetric("/netstack/igmp/packets_sent/dropped", "Number of IGMP packets dropped by netstack due to link layer errors."),
},
PacketsReceived: tcpip.IGMPReceivedPacketStats{
IGMPPacketStats: tcpip.IGMPPacketStats{
- MembershipQuery: mustCreateMetric("/netstack/igmp/packets_received/membership_query", "Total number of IGMP Membership Query messages received by netstack."),
- V1MembershipReport: mustCreateMetric("/netstack/igmp/packets_received/v1_membership_report", "Total number of IGMPv1 Membership Report messages received by netstack."),
- V2MembershipReport: mustCreateMetric("/netstack/igmp/packets_received/v2_membership_report", "Total number of IGMPv2 Membership Report messages received by netstack."),
- LeaveGroup: mustCreateMetric("/netstack/igmp/packets_received/leave_group", "Total number of IGMP Leave Group messages received by netstack."),
+ MembershipQuery: mustCreateMetric("/netstack/igmp/packets_received/membership_query", "Number of IGMP Membership Query messages received by netstack."),
+ V1MembershipReport: mustCreateMetric("/netstack/igmp/packets_received/v1_membership_report", "Number of IGMPv1 Membership Report messages received by netstack."),
+ V2MembershipReport: mustCreateMetric("/netstack/igmp/packets_received/v2_membership_report", "Number of IGMPv2 Membership Report messages received by netstack."),
+ LeaveGroup: mustCreateMetric("/netstack/igmp/packets_received/leave_group", "Number of IGMP Leave Group messages received by netstack."),
},
- Invalid: mustCreateMetric("/netstack/igmp/packets_received/invalid", "Total number of IGMP packets received by netstack that could not be parsed."),
- ChecksumErrors: mustCreateMetric("/netstack/igmp/packets_received/checksum_errors", "Total number of received IGMP packets with bad checksums."),
- Unrecognized: mustCreateMetric("/netstack/igmp/packets_received/unrecognized", "Total number of unrecognized IGMP packets received by netstack."),
+ Invalid: mustCreateMetric("/netstack/igmp/packets_received/invalid", "Number of IGMP packets received by netstack that could not be parsed."),
+ ChecksumErrors: mustCreateMetric("/netstack/igmp/packets_received/checksum_errors", "Number of received IGMP packets with bad checksums."),
+ Unrecognized: mustCreateMetric("/netstack/igmp/packets_received/unrecognized", "Number of unrecognized IGMP packets received by netstack."),
},
},
IP: tcpip.IPStats{
- PacketsReceived: mustCreateMetric("/netstack/ip/packets_received", "Total number of IP packets received from the link layer in nic.DeliverNetworkPacket."),
- InvalidDestinationAddressesReceived: mustCreateMetric("/netstack/ip/invalid_addresses_received", "Total number of IP packets received with an unknown or invalid destination address."),
- InvalidSourceAddressesReceived: mustCreateMetric("/netstack/ip/invalid_source_addresses_received", "Total number of IP packets received with an unknown or invalid source address."),
- PacketsDelivered: mustCreateMetric("/netstack/ip/packets_delivered", "Total number of incoming IP packets that are successfully delivered to the transport layer via HandlePacket."),
- PacketsSent: mustCreateMetric("/netstack/ip/packets_sent", "Total number of IP packets sent via WritePacket."),
- OutgoingPacketErrors: mustCreateMetric("/netstack/ip/outgoing_packet_errors", "Total number of IP packets which failed to write to a link-layer endpoint."),
- MalformedPacketsReceived: mustCreateMetric("/netstack/ip/malformed_packets_received", "Total number of IP packets which failed IP header validation checks."),
- MalformedFragmentsReceived: mustCreateMetric("/netstack/ip/malformed_fragments_received", "Total number of IP fragments which failed IP fragment validation checks."),
- IPTablesPreroutingDropped: mustCreateMetric("/netstack/ip/iptables/prerouting_dropped", "Total number of IP packets dropped in the Prerouting chain."),
- IPTablesInputDropped: mustCreateMetric("/netstack/ip/iptables/input_dropped", "Total number of IP packets dropped in the Input chain."),
- IPTablesOutputDropped: mustCreateMetric("/netstack/ip/iptables/output_dropped", "Total number of IP packets dropped in the Output chain."),
- OptionTimestampReceived: mustCreateMetric("/netstack/ip/options/timestamp_received", "Total number of timestamp options found in received IP packets."),
- OptionRecordRouteReceived: mustCreateMetric("/netstack/ip/options/record_route_received", "Total number of record route options found in received IP packets."),
- OptionRouterAlertReceived: mustCreateMetric("/netstack/ip/options/router_alert_received", "Total number of router alert options found in received IP packets."),
- OptionUnknownReceived: mustCreateMetric("/netstack/ip/options/unknown_received", "Total number of unknown options found in received IP packets."),
+ PacketsReceived: mustCreateMetric("/netstack/ip/packets_received", "Number of IP packets received from the link layer in nic.DeliverNetworkPacket."),
+ DisabledPacketsReceived: mustCreateMetric("/netstack/ip/disabled_packets_received", "Number of IP packets received from the link layer when the IP layer is disabled."),
+ InvalidDestinationAddressesReceived: mustCreateMetric("/netstack/ip/invalid_addresses_received", "Number of IP packets received with an unknown or invalid destination address."),
+ InvalidSourceAddressesReceived: mustCreateMetric("/netstack/ip/invalid_source_addresses_received", "Number of IP packets received with an unknown or invalid source address."),
+ PacketsDelivered: mustCreateMetric("/netstack/ip/packets_delivered", "Number of incoming IP packets that are successfully delivered to the transport layer via HandlePacket."),
+ PacketsSent: mustCreateMetric("/netstack/ip/packets_sent", "Number of IP packets sent via WritePacket."),
+ OutgoingPacketErrors: mustCreateMetric("/netstack/ip/outgoing_packet_errors", "Number of IP packets which failed to write to a link-layer endpoint."),
+ MalformedPacketsReceived: mustCreateMetric("/netstack/ip/malformed_packets_received", "Number of IP packets which failed IP header validation checks."),
+ MalformedFragmentsReceived: mustCreateMetric("/netstack/ip/malformed_fragments_received", "Number of IP fragments which failed IP fragment validation checks."),
+ IPTablesPreroutingDropped: mustCreateMetric("/netstack/ip/iptables/prerouting_dropped", "Number of IP packets dropped in the Prerouting chain."),
+ IPTablesInputDropped: mustCreateMetric("/netstack/ip/iptables/input_dropped", "Number of IP packets dropped in the Input chain."),
+ IPTablesOutputDropped: mustCreateMetric("/netstack/ip/iptables/output_dropped", "Number of IP packets dropped in the Output chain."),
+ OptionTimestampReceived: mustCreateMetric("/netstack/ip/options/timestamp_received", "Number of timestamp options found in received IP packets."),
+ OptionRecordRouteReceived: mustCreateMetric("/netstack/ip/options/record_route_received", "Number of record route options found in received IP packets."),
+ OptionRouterAlertReceived: mustCreateMetric("/netstack/ip/options/router_alert_received", "Number of router alert options found in received IP packets."),
+ OptionUnknownReceived: mustCreateMetric("/netstack/ip/options/unknown_received", "Number of unknown options found in received IP packets."),
},
ARP: tcpip.ARPStats{
PacketsReceived: mustCreateMetric("/netstack/arp/packets_received", "Number of ARP packets received from the link layer."),
diff --git a/pkg/sentry/socket/netstack/stack.go b/pkg/sentry/socket/netstack/stack.go
index cc0fadeb5..b215067cf 100644
--- a/pkg/sentry/socket/netstack/stack.go
+++ b/pkg/sentry/socket/netstack/stack.go
@@ -336,7 +336,7 @@ func (s *Stack) Statistics(stat interface{}, arg string) error {
in.ParamProblem.Value(), // InParmProbs.
in.SrcQuench.Value(), // InSrcQuenchs.
in.Redirect.Value(), // InRedirects.
- in.Echo.Value(), // InEchos.
+ in.EchoRequest.Value(), // InEchos.
in.EchoReply.Value(), // InEchoReps.
in.Timestamp.Value(), // InTimestamps.
in.TimestampReply.Value(), // InTimestampReps.
@@ -349,7 +349,7 @@ func (s *Stack) Statistics(stat interface{}, arg string) error {
out.ParamProblem.Value(), // OutParmProbs.
out.SrcQuench.Value(), // OutSrcQuenchs.
out.Redirect.Value(), // OutRedirects.
- out.Echo.Value(), // OutEchos.
+ out.EchoRequest.Value(), // OutEchos.
out.EchoReply.Value(), // OutEchoReps.
out.Timestamp.Value(), // OutTimestamps.
out.TimestampReply.Value(), // OutTimestampReps.
@@ -478,3 +478,13 @@ func (s *Stack) SetForwarding(protocol tcpip.NetworkProtocolNumber, enable bool)
}
return nil
}
+
+// PortRange implements inet.Stack.PortRange.
+func (s *Stack) PortRange() (uint16, uint16) {
+ return s.Stack.PortRange()
+}
+
+// SetPortRange implements inet.Stack.SetPortRange.
+func (s *Stack) SetPortRange(start uint16, end uint16) error {
+ return syserr.TranslateNetstackError(s.Stack.SetPortRange(start, end)).ToError()
+}
diff --git a/pkg/sentry/vfs/anonfs.go b/pkg/sentry/vfs/anonfs.go
index 7ad0eaf86..3caf417ca 100644
--- a/pkg/sentry/vfs/anonfs.go
+++ b/pkg/sentry/vfs/anonfs.go
@@ -291,6 +291,11 @@ func (fs *anonFilesystem) PrependPath(ctx context.Context, vfsroot, vd VirtualDe
return PrependPathSyntheticError{}
}
+// MountOptions implements FilesystemImpl.MountOptions.
+func (fs *anonFilesystem) MountOptions() string {
+ return ""
+}
+
// IncRef implements DentryImpl.IncRef.
func (d *anonDentry) IncRef() {
// no-op
diff --git a/pkg/sentry/vfs/dentry.go b/pkg/sentry/vfs/dentry.go
index 320ab7ce1..e7ca24d96 100644
--- a/pkg/sentry/vfs/dentry.go
+++ b/pkg/sentry/vfs/dentry.go
@@ -211,12 +211,14 @@ func (vfs *VirtualFilesystem) PrepareDeleteDentry(mntns *MountNamespace, d *Dent
// AbortDeleteDentry must be called after PrepareDeleteDentry if the deletion
// fails.
+// +checklocks:d.mu
func (vfs *VirtualFilesystem) AbortDeleteDentry(d *Dentry) {
d.mu.Unlock()
}
// CommitDeleteDentry must be called after PrepareDeleteDentry if the deletion
// succeeds.
+// +checklocks:d.mu
func (vfs *VirtualFilesystem) CommitDeleteDentry(ctx context.Context, d *Dentry) {
d.dead = true
d.mu.Unlock()
@@ -270,6 +272,8 @@ func (vfs *VirtualFilesystem) PrepareRenameDentry(mntns *MountNamespace, from, t
// AbortRenameDentry must be called after PrepareRenameDentry if the rename
// fails.
+// +checklocks:from.mu
+// +checklocks:to.mu
func (vfs *VirtualFilesystem) AbortRenameDentry(from, to *Dentry) {
from.mu.Unlock()
if to != nil {
@@ -282,6 +286,8 @@ func (vfs *VirtualFilesystem) AbortRenameDentry(from, to *Dentry) {
// that was replaced by from.
//
// Preconditions: PrepareRenameDentry was previously called on from and to.
+// +checklocks:from.mu
+// +checklocks:to.mu
func (vfs *VirtualFilesystem) CommitRenameReplaceDentry(ctx context.Context, from, to *Dentry) {
from.mu.Unlock()
if to != nil {
@@ -297,6 +303,8 @@ func (vfs *VirtualFilesystem) CommitRenameReplaceDentry(ctx context.Context, fro
// from and to are exchanged by rename(RENAME_EXCHANGE).
//
// Preconditions: PrepareRenameDentry was previously called on from and to.
+// +checklocks:from.mu
+// +checklocks:to.mu
func (vfs *VirtualFilesystem) CommitRenameExchangeDentry(from, to *Dentry) {
from.mu.Unlock()
to.mu.Unlock()
diff --git a/pkg/sentry/vfs/filesystem.go b/pkg/sentry/vfs/filesystem.go
index 2c4b81e78..059939010 100644
--- a/pkg/sentry/vfs/filesystem.go
+++ b/pkg/sentry/vfs/filesystem.go
@@ -502,6 +502,15 @@ type FilesystemImpl interface {
//
// Preconditions: vd.Mount().Filesystem().Impl() == this FilesystemImpl.
PrependPath(ctx context.Context, vfsroot, vd VirtualDentry, b *fspath.Builder) error
+
+ // MountOptions returns mount options for the current filesystem. This
+ // should only return options specific to the filesystem (i.e. don't return
+ // "ro", "rw", etc). Options should be returned as a comma-separated string,
+ // similar to the input to the 5th argument to mount.
+ //
+ // If the implementation has no filesystem-specific options, it should
+ // return the empty string.
+ MountOptions() string
}
// PrependPathAtVFSRootError is returned by implementations of
diff --git a/pkg/sentry/vfs/mount.go b/pkg/sentry/vfs/mount.go
index 7063066ff..922f9e697 100644
--- a/pkg/sentry/vfs/mount.go
+++ b/pkg/sentry/vfs/mount.go
@@ -217,20 +217,21 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr
return err
}
vfs.mountMu.Lock()
- vd.dentry.mu.Lock()
+ vdDentry := vd.dentry
+ vdDentry.mu.Lock()
for {
- if vd.dentry.dead {
- vd.dentry.mu.Unlock()
+ if vdDentry.dead {
+ vdDentry.mu.Unlock()
vfs.mountMu.Unlock()
vd.DecRef(ctx)
return syserror.ENOENT
}
// vd might have been mounted over between vfs.GetDentryAt() and
// vfs.mountMu.Lock().
- if !vd.dentry.isMounted() {
+ if !vdDentry.isMounted() {
break
}
- nextmnt := vfs.mounts.Lookup(vd.mount, vd.dentry)
+ nextmnt := vfs.mounts.Lookup(vd.mount, vdDentry)
if nextmnt == nil {
break
}
@@ -243,13 +244,13 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr
}
// This can't fail since we're holding vfs.mountMu.
nextmnt.root.IncRef()
- vd.dentry.mu.Unlock()
+ vdDentry.mu.Unlock()
vd.DecRef(ctx)
vd = VirtualDentry{
mount: nextmnt,
dentry: nextmnt.root,
}
- vd.dentry.mu.Lock()
+ vdDentry.mu.Lock()
}
// TODO(gvisor.dev/issue/1035): Linux requires that either both the mount
// point and the mount root are directories, or neither are, and returns
@@ -258,7 +259,7 @@ func (vfs *VirtualFilesystem) ConnectMountAt(ctx context.Context, creds *auth.Cr
vfs.mounts.seq.BeginWrite()
vfs.connectLocked(mnt, vd, mntns)
vfs.mounts.seq.EndWrite()
- vd.dentry.mu.Unlock()
+ vdDentry.mu.Unlock()
vfs.mountMu.Unlock()
return nil
}
@@ -958,13 +959,17 @@ func manglePath(p string) string {
// superBlockOpts returns the super block options string for the the mount at
// the given path.
func superBlockOpts(mountPath string, mnt *Mount) string {
- // gVisor doesn't (yet) have a concept of super block options, so we
- // use the ro/rw bit from the mount flag.
+ // Compose super block options by combining global mount flags with
+ // FS-specific mount options.
opts := "rw"
if mnt.ReadOnly() {
opts = "ro"
}
+ if mopts := mnt.fs.Impl().MountOptions(); mopts != "" {
+ opts += "," + mopts
+ }
+
// NOTE(b/147673608): If the mount is a cgroup, we also need to include
// the cgroup name in the options. For now we just read that from the
// path.
diff --git a/pkg/sync/mutex_unsafe.go b/pkg/sync/mutex_unsafe.go
index 4e49a9b89..411a80a8a 100644
--- a/pkg/sync/mutex_unsafe.go
+++ b/pkg/sync/mutex_unsafe.go
@@ -72,6 +72,7 @@ func (m *Mutex) Lock() {
// Preconditions:
// * m is locked.
// * m was locked by this goroutine.
+// +checklocksignore
func (m *Mutex) Unlock() {
noteUnlock(unsafe.Pointer(m))
m.m.Unlock()
diff --git a/pkg/sync/rwmutex_unsafe.go b/pkg/sync/rwmutex_unsafe.go
index 4cf3fcd6e..892d3e641 100644
--- a/pkg/sync/rwmutex_unsafe.go
+++ b/pkg/sync/rwmutex_unsafe.go
@@ -105,6 +105,7 @@ func (rw *CrossGoroutineRWMutex) RUnlock() {
// TryLock locks rw for writing. It returns true if it succeeds and false
// otherwise. It does not block.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) TryLock() bool {
if RaceEnabled {
RaceDisable()
@@ -155,6 +156,7 @@ func (rw *CrossGoroutineRWMutex) Lock() {
//
// Preconditions:
// * rw is locked for writing.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) Unlock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.writerSem))
@@ -181,6 +183,7 @@ func (rw *CrossGoroutineRWMutex) Unlock() {
//
// Preconditions:
// * rw is locked for writing.
+// +checklocksignore
func (rw *CrossGoroutineRWMutex) DowngradeLock() {
if RaceEnabled {
RaceRelease(unsafe.Pointer(&rw.readerSem))
@@ -250,6 +253,7 @@ func (rw *RWMutex) RLock() {
// Preconditions:
// * rw is locked for reading.
// * rw was locked by this goroutine.
+// +checklocksignore
func (rw *RWMutex) RUnlock() {
rw.m.RUnlock()
noteUnlock(unsafe.Pointer(rw))
@@ -279,6 +283,7 @@ func (rw *RWMutex) Lock() {
// Preconditions:
// * rw is locked for writing.
// * rw was locked by this goroutine.
+// +checklocksignore
func (rw *RWMutex) Unlock() {
rw.m.Unlock()
noteUnlock(unsafe.Pointer(rw))
@@ -288,6 +293,7 @@ func (rw *RWMutex) Unlock() {
//
// Preconditions:
// * rw is locked for writing.
+// +checklocksignore
func (rw *RWMutex) DowngradeLock() {
// No note change for DowngradeLock.
rw.m.DowngradeLock()
diff --git a/pkg/syserr/netstack.go b/pkg/syserr/netstack.go
index 0b9139570..79e564de6 100644
--- a/pkg/syserr/netstack.go
+++ b/pkg/syserr/netstack.go
@@ -51,6 +51,7 @@ var (
ErrNotPermittedNet = New((&tcpip.ErrNotPermitted{}).String(), linux.EPERM)
ErrBadBuffer = New((&tcpip.ErrBadBuffer{}).String(), linux.EFAULT)
ErrMalformedHeader = New((&tcpip.ErrMalformedHeader{}).String(), linux.EINVAL)
+ ErrInvalidPortRange = New((&tcpip.ErrInvalidPortRange{}).String(), linux.EINVAL)
)
// TranslateNetstackError converts an error from the tcpip package to a sentry
@@ -135,6 +136,8 @@ func TranslateNetstackError(err tcpip.Error) *Error {
return ErrBadBuffer
case *tcpip.ErrMalformedHeader:
return ErrMalformedHeader
+ case *tcpip.ErrInvalidPortRange:
+ return ErrInvalidPortRange
default:
panic(fmt.Sprintf("unknown error %T", err))
}
diff --git a/pkg/tcpip/buffer/view.go b/pkg/tcpip/buffer/view.go
index b05e81526..f4a30effd 100644
--- a/pkg/tcpip/buffer/view.go
+++ b/pkg/tcpip/buffer/view.go
@@ -196,7 +196,7 @@ func (vv *VectorisedView) CapLength(length int) {
// If the buffer argument is large enough to contain all the Views of this
// VectorisedView, the method will avoid allocations and use the buffer to
// store the Views of the clone.
-func (vv *VectorisedView) Clone(buffer []View) VectorisedView {
+func (vv VectorisedView) Clone(buffer []View) VectorisedView {
return VectorisedView{views: append(buffer[:0], vv.views...), size: vv.size}
}
@@ -290,6 +290,14 @@ func (vv *VectorisedView) AppendView(v View) {
vv.size += len(v)
}
+// AppendViews appends views to vv.
+func (vv *VectorisedView) AppendViews(views []View) {
+ vv.views = append(vv.views, views...)
+ for _, v := range views {
+ vv.size += len(v)
+ }
+}
+
// Readers returns a bytes.Reader for each of vv's views.
func (vv *VectorisedView) Readers() []bytes.Reader {
readers := make([]bytes.Reader, 0, len(vv.views))
diff --git a/pkg/tcpip/buffer/view_test.go b/pkg/tcpip/buffer/view_test.go
index 78b2faa26..d296d9c2b 100644
--- a/pkg/tcpip/buffer/view_test.go
+++ b/pkg/tcpip/buffer/view_test.go
@@ -45,6 +45,11 @@ func vv(size int, pieces ...string) buffer.VectorisedView {
return buffer.NewVectorisedView(size, views)
}
+// v returns a buffer.View containing piece.
+func v(piece string) buffer.View {
+ return buffer.View(piece)
+}
+
var capLengthTestCases = []struct {
comment string
in buffer.VectorisedView
@@ -125,6 +130,12 @@ var trimFrontTestCases = []struct {
want: vv(1, "3"),
},
{
+ comment: "Case with one empty Views",
+ in: vv(3, "1", "", "23"),
+ count: 2,
+ want: vv(1, "3"),
+ },
+ {
comment: "Corner case with negative count",
in: vv(1, "1"),
count: -1,
@@ -566,11 +577,11 @@ func TestAppendView(t *testing.T) {
in buffer.View
want buffer.VectorisedView
}{
- {buffer.VectorisedView{}, nil, buffer.VectorisedView{}},
- {buffer.VectorisedView{}, buffer.View{}, buffer.VectorisedView{}},
- {buffer.NewVectorisedView(4, []buffer.View{{'a', 'b', 'c', 'd'}}), nil, buffer.NewVectorisedView(4, []buffer.View{{'a', 'b', 'c', 'd'}})},
- {buffer.NewVectorisedView(4, []buffer.View{{'a', 'b', 'c', 'd'}}), buffer.View{}, buffer.NewVectorisedView(4, []buffer.View{{'a', 'b', 'c', 'd'}})},
- {buffer.NewVectorisedView(4, []buffer.View{{'a', 'b', 'c', 'd'}}), buffer.View{'e'}, buffer.NewVectorisedView(5, []buffer.View{{'a', 'b', 'c', 'd'}, {'e'}})},
+ {vv(0), nil, vv(0)},
+ {vv(0), v(""), vv(0)},
+ {vv(4, "abcd"), nil, vv(4, "abcd")},
+ {vv(4, "abcd"), v(""), vv(4, "abcd")},
+ {vv(4, "abcd"), v("e"), vv(5, "abcd", "e")},
}
for _, tc := range testCases {
tc.vv.AppendView(tc.in)
@@ -580,6 +591,31 @@ func TestAppendView(t *testing.T) {
}
}
+func TestAppendViews(t *testing.T) {
+ testCases := []struct {
+ vv buffer.VectorisedView
+ in []buffer.View
+ want buffer.VectorisedView
+ }{
+ {vv(0), nil, vv(0)},
+ {vv(0), []buffer.View{}, vv(0)},
+ {vv(0), []buffer.View{v("")}, vv(0, "")},
+ {vv(4, "abcd"), nil, vv(4, "abcd")},
+ {vv(4, "abcd"), []buffer.View{}, vv(4, "abcd")},
+ {vv(4, "abcd"), []buffer.View{v("")}, vv(4, "abcd", "")},
+ {vv(4, "abcd"), []buffer.View{v("")}, vv(4, "abcd", "")},
+ {vv(4, "abcd"), []buffer.View{v("e")}, vv(5, "abcd", "e")},
+ {vv(4, "abcd"), []buffer.View{v("e"), v("fg")}, vv(7, "abcd", "e", "fg")},
+ {vv(4, "abcd"), []buffer.View{v(""), v("fg")}, vv(6, "abcd", "", "fg")},
+ }
+ for _, tc := range testCases {
+ tc.vv.AppendViews(tc.in)
+ if got, want := tc.vv, tc.want; !reflect.DeepEqual(got, want) {
+ t.Errorf("(%v).ToVectorisedView failed got: %+v, want: %+v", tc.in, got, want)
+ }
+ }
+}
+
func TestMemSize(t *testing.T) {
const perViewCap = 128
views := make([]buffer.View, 2, 32)
diff --git a/pkg/tcpip/checker/checker.go b/pkg/tcpip/checker/checker.go
index 07b4393a4..fc622b246 100644
--- a/pkg/tcpip/checker/checker.go
+++ b/pkg/tcpip/checker/checker.go
@@ -567,7 +567,7 @@ func TCPWindowLessThanEq(window uint16) TransportChecker {
}
// TCPFlags creates a checker that checks the tcp flags.
-func TCPFlags(flags uint8) TransportChecker {
+func TCPFlags(flags header.TCPFlags) TransportChecker {
return func(t *testing.T, h header.Transport) {
t.Helper()
@@ -576,15 +576,15 @@ func TCPFlags(flags uint8) TransportChecker {
t.Fatalf("TCP header not found in h: %T", h)
}
- if f := tcp.Flags(); f != flags {
- t.Errorf("Bad flags, got 0x%x, want 0x%x", f, flags)
+ if got := tcp.Flags(); got != flags {
+ t.Errorf("got tcp.Flags() = %s, want %s", got, flags)
}
}
}
// TCPFlagsMatch creates a checker that checks that the tcp flags, masked by the
// given mask, match the supplied flags.
-func TCPFlagsMatch(flags, mask uint8) TransportChecker {
+func TCPFlagsMatch(flags, mask header.TCPFlags) TransportChecker {
return func(t *testing.T, h header.Transport) {
t.Helper()
@@ -593,8 +593,8 @@ func TCPFlagsMatch(flags, mask uint8) TransportChecker {
t.Fatalf("TCP header not found in h: %T", h)
}
- if f := tcp.Flags(); (f & mask) != (flags & mask) {
- t.Errorf("Bad masked flags, got 0x%x, want 0x%x, mask 0x%x", f, flags, mask)
+ if got := tcp.Flags(); (got & mask) != (flags & mask) {
+ t.Errorf("got tcp.Flags() = %s, want %s, mask %s", got, flags, mask)
}
}
}
@@ -985,7 +985,11 @@ func ICMPv6(checkers ...TransportChecker) NetworkChecker {
}
icmp := header.ICMPv6(last.Payload())
- if got, want := icmp.Checksum(), header.ICMPv6Checksum(icmp, last.SourceAddress(), last.DestinationAddress(), buffer.VectorisedView{}); got != want {
+ if got, want := icmp.Checksum(), header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: last.SourceAddress(),
+ Dst: last.DestinationAddress(),
+ }); got != want {
t.Fatalf("Bad ICMPv6 checksum; got %d, want %d", got, want)
}
diff --git a/pkg/tcpip/errors.go b/pkg/tcpip/errors.go
index 3b7cc52f3..5d478ac32 100644
--- a/pkg/tcpip/errors.go
+++ b/pkg/tcpip/errors.go
@@ -300,6 +300,19 @@ func (*ErrInvalidOptionValue) IgnoreStats() bool {
}
func (*ErrInvalidOptionValue) String() string { return "invalid option value specified" }
+// ErrInvalidPortRange indicates an attempt to set an invalid port range.
+//
+// +stateify savable
+type ErrInvalidPortRange struct{}
+
+func (*ErrInvalidPortRange) isError() {}
+
+// IgnoreStats implements Error.
+func (*ErrInvalidPortRange) IgnoreStats() bool {
+ return true
+}
+func (*ErrInvalidPortRange) String() string { return "invalid port range" }
+
// ErrMalformedHeader indicates the operation encountered a malformed header.
//
// +stateify savable
diff --git a/pkg/tcpip/header/checksum.go b/pkg/tcpip/header/checksum.go
index 14a4b2b44..6aa9acfa8 100644
--- a/pkg/tcpip/header/checksum.go
+++ b/pkg/tcpip/header/checksum.go
@@ -186,42 +186,29 @@ func Checksum(buf []byte, initial uint16) uint16 {
//
// The initial checksum must have been computed on an even number of bytes.
func ChecksumVV(vv buffer.VectorisedView, initial uint16) uint16 {
- return ChecksumVVWithOffset(vv, initial, 0, vv.Size())
+ var c Checksumer
+ for _, v := range vv.Views() {
+ c.Add([]byte(v))
+ }
+ return ChecksumCombine(initial, c.Checksum())
}
-// ChecksumVVWithOffset calculates the checksum (as defined in RFC 1071) of the
-// bytes in the given VectorizedView.
-//
-// The initial checksum must have been computed on an even number of bytes.
-func ChecksumVVWithOffset(vv buffer.VectorisedView, initial uint16, off int, size int) uint16 {
- odd := false
- sum := initial
- for _, v := range vv.Views() {
- if len(v) == 0 {
- continue
- }
-
- if off >= len(v) {
- off -= len(v)
- continue
- }
- v = v[off:]
-
- l := len(v)
- if l > size {
- l = size
- }
- v = v[:l]
-
- sum, odd = unrolledCalculateChecksum(v, odd, uint32(sum))
-
- size -= len(v)
- if size == 0 {
- break
- }
- off = 0
+// Checksumer calculates checksum defined in RFC 1071.
+type Checksumer struct {
+ sum uint16
+ odd bool
+}
+
+// Add adds b to checksum.
+func (c *Checksumer) Add(b []byte) {
+ if len(b) > 0 {
+ c.sum, c.odd = unrolledCalculateChecksum(b, c.odd, uint32(c.sum))
}
- return sum
+}
+
+// Checksum returns the latest checksum value.
+func (c *Checksumer) Checksum() uint16 {
+ return c.sum
}
// ChecksumCombine combines the two uint16 to form their checksum. This is done
diff --git a/pkg/tcpip/header/checksum_test.go b/pkg/tcpip/header/checksum_test.go
index 5ab20ee86..d267dabd0 100644
--- a/pkg/tcpip/header/checksum_test.go
+++ b/pkg/tcpip/header/checksum_test.go
@@ -17,6 +17,7 @@
package header_test
import (
+ "bytes"
"fmt"
"math/rand"
"sync"
@@ -26,86 +27,72 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/header"
)
-func TestChecksumVVWithOffset(t *testing.T) {
+func TestChecksumer(t *testing.T) {
testCases := []struct {
- name string
- vv buffer.VectorisedView
- off, size int
- initial uint16
- want uint16
+ name string
+ data [][]byte
+ want uint16
}{
{
name: "empty",
- vv: buffer.NewVectorisedView(0, []buffer.View{
- buffer.NewViewFromBytes([]byte{1, 9, 0, 5, 4}),
- }),
- off: 0,
- size: 0,
want: 0,
},
{
- name: "OneView",
- vv: buffer.NewVectorisedView(0, []buffer.View{
- buffer.NewViewFromBytes([]byte{1, 9, 0, 5, 4}),
- }),
- off: 0,
- size: 5,
+ name: "OneOddView",
+ data: [][]byte{
+ []byte{1, 9, 0, 5, 4},
+ },
want: 1294,
},
{
- name: "TwoViews",
- vv: buffer.NewVectorisedView(0, []buffer.View{
- buffer.NewViewFromBytes([]byte{1, 9, 0, 5, 4}),
- buffer.NewViewFromBytes([]byte{4, 3, 7, 1, 2, 123}),
- }),
- off: 0,
- size: 11,
+ name: "TwoOddViews",
+ data: [][]byte{
+ []byte{1, 9, 0, 5, 4},
+ []byte{4, 3, 7, 1, 2, 123},
+ },
want: 33819,
},
{
- name: "TwoViewsWithOffset",
- vv: buffer.NewVectorisedView(0, []buffer.View{
- buffer.NewViewFromBytes([]byte{98, 1, 9, 0, 5, 4}),
- buffer.NewViewFromBytes([]byte{4, 3, 7, 1, 2, 123}),
- }),
- off: 1,
- size: 11,
- want: 33819,
+ name: "OneEvenView",
+ data: [][]byte{
+ []byte{1, 9, 0, 5},
+ },
+ want: 270,
},
{
- name: "ThreeViewsWithOffset",
- vv: buffer.NewVectorisedView(0, []buffer.View{
- buffer.NewViewFromBytes([]byte{98, 1, 9, 0, 5, 4}),
- buffer.NewViewFromBytes([]byte{98, 1, 9, 0, 5, 4}),
- buffer.NewViewFromBytes([]byte{4, 3, 7, 1, 2, 123}),
- }),
- off: 7,
- size: 11,
- want: 33819,
+ name: "TwoEvenViews",
+ data: [][]byte{
+ buffer.NewViewFromBytes([]byte{98, 1, 9, 0}),
+ buffer.NewViewFromBytes([]byte{9, 0, 5, 4}),
+ },
+ want: 30981,
},
{
- name: "ThreeViewsWithInitial",
- vv: buffer.NewVectorisedView(0, []buffer.View{
- buffer.NewViewFromBytes([]byte{77, 11, 33, 0, 55, 44}),
- buffer.NewViewFromBytes([]byte{98, 1, 9, 0, 5, 4}),
- buffer.NewViewFromBytes([]byte{4, 3, 7, 1, 2, 123, 99}),
- }),
- initial: 77,
- off: 7,
- size: 11,
- want: 33896,
+ name: "ThreeViews",
+ data: [][]byte{
+ []byte{77, 11, 33, 0, 55, 44},
+ []byte{98, 1, 9, 0, 5, 4},
+ []byte{4, 3, 7, 1, 2, 123, 99},
+ },
+ want: 34236,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
- if got, want := header.ChecksumVVWithOffset(tc.vv, tc.initial, tc.off, tc.size), tc.want; got != want {
- t.Errorf("header.ChecksumVVWithOffset(%v) = %v, want: %v", tc, got, tc.want)
+ var all bytes.Buffer
+ var c header.Checksumer
+ for _, b := range tc.data {
+ c.Add(b)
+ // Append to the buffer. We will check the checksum as a whole later.
+ if _, err := all.Write(b); err != nil {
+ t.Fatalf("all.Write(b) = _, %s; want _, nil", err)
+ }
+ }
+ if got, want := c.Checksum(), tc.want; got != want {
+ t.Errorf("c.Checksum() = %d, want %d", got, want)
}
- v := tc.vv.ToView()
- v.TrimFront(tc.off)
- v.CapLength(tc.size)
- if got, want := header.Checksum(v, tc.initial), tc.want; got != want {
- t.Errorf("header.Checksum(%v) = %v, want: %v", tc, got, tc.want)
+ if got, want := header.Checksum(all.Bytes(), 0 /* initial */), tc.want; got != want {
+ t.Errorf("Checksum(flatten tc.data) = %d, want %d", got, want)
}
})
}
@@ -228,7 +215,7 @@ func TestICMPv4Checksum(t *testing.T) {
h.SetChecksum(want)
testICMPChecksum(t, h.Checksum, func() uint16 {
- return header.ICMPv4Checksum(h, vv)
+ return header.ICMPv4Checksum(h, header.ChecksumVV(vv, 0))
}, want, fmt.Sprintf("header: {% x} data {% x}", h, vv.ToView()))
}
@@ -260,6 +247,12 @@ func TestICMPv6Checksum(t *testing.T) {
h.SetChecksum(want)
testICMPChecksum(t, h.Checksum, func() uint16 {
- return header.ICMPv6Checksum(h, src, dst, vv)
+ return header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: h,
+ Src: src,
+ Dst: dst,
+ PayloadCsum: header.ChecksumVV(vv, 0),
+ PayloadLen: vv.Size(),
+ })
}, want, fmt.Sprintf("header: {% x} data {% x}", h, vv.ToView()))
}
diff --git a/pkg/tcpip/header/icmpv4.go b/pkg/tcpip/header/icmpv4.go
index f840a4322..91c1c3cd2 100644
--- a/pkg/tcpip/header/icmpv4.go
+++ b/pkg/tcpip/header/icmpv4.go
@@ -18,7 +18,6 @@ import (
"encoding/binary"
"gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
)
// ICMPv4 represents an ICMPv4 header stored in a byte array.
@@ -198,8 +197,8 @@ func (b ICMPv4) SetSequence(sequence uint16) {
// ICMPv4Checksum calculates the ICMP checksum over the provided ICMP header,
// and payload.
-func ICMPv4Checksum(h ICMPv4, vv buffer.VectorisedView) uint16 {
- xsum := ChecksumVV(vv, 0)
+func ICMPv4Checksum(h ICMPv4, payloadCsum uint16) uint16 {
+ xsum := payloadCsum
// h[2:4] is the checksum itself, skip it to avoid checksumming the checksum.
xsum = Checksum(h[:2], xsum)
diff --git a/pkg/tcpip/header/icmpv6.go b/pkg/tcpip/header/icmpv6.go
index eca9750ab..668da623a 100644
--- a/pkg/tcpip/header/icmpv6.go
+++ b/pkg/tcpip/header/icmpv6.go
@@ -18,7 +18,6 @@ import (
"encoding/binary"
"gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
)
// ICMPv6 represents an ICMPv6 header stored in a byte array.
@@ -262,12 +261,22 @@ func (b ICMPv6) Payload() []byte {
return b[ICMPv6PayloadOffset:]
}
+// ICMPv6ChecksumParams contains parameters to calculate ICMPv6 checksum.
+type ICMPv6ChecksumParams struct {
+ Header ICMPv6
+ Src tcpip.Address
+ Dst tcpip.Address
+ PayloadCsum uint16
+ PayloadLen int
+}
+
// ICMPv6Checksum calculates the ICMP checksum over the provided ICMPv6 header,
// IPv6 src/dst addresses and the payload.
-func ICMPv6Checksum(h ICMPv6, src, dst tcpip.Address, vv buffer.VectorisedView) uint16 {
- xsum := PseudoHeaderChecksum(ICMPv6ProtocolNumber, src, dst, uint16(len(h)+vv.Size()))
+func ICMPv6Checksum(params ICMPv6ChecksumParams) uint16 {
+ h := params.Header
- xsum = ChecksumVV(vv, xsum)
+ xsum := PseudoHeaderChecksum(ICMPv6ProtocolNumber, params.Src, params.Dst, uint16(len(h)+params.PayloadLen))
+ xsum = ChecksumCombine(xsum, params.PayloadCsum)
// h[2:4] is the checksum itself, skip it to avoid checksumming the checksum.
xsum = Checksum(h[:2], xsum)
diff --git a/pkg/tcpip/header/parse/parse.go b/pkg/tcpip/header/parse/parse.go
index 2042f214a..ebb4b2c1d 100644
--- a/pkg/tcpip/header/parse/parse.go
+++ b/pkg/tcpip/header/parse/parse.go
@@ -41,7 +41,7 @@ func ARP(pkt *stack.PacketBuffer) bool {
//
// Returns true if the header was successfully parsed.
func IPv4(pkt *stack.PacketBuffer) bool {
- hdr, ok := pkt.Data.PullUp(header.IPv4MinimumSize)
+ hdr, ok := pkt.Data().PullUp(header.IPv4MinimumSize)
if !ok {
return false
}
@@ -62,27 +62,29 @@ func IPv4(pkt *stack.PacketBuffer) bool {
ipHdr = header.IPv4(hdr)
pkt.NetworkProtocolNumber = header.IPv4ProtocolNumber
- pkt.Data.CapLength(int(ipHdr.TotalLength()) - len(hdr))
+ pkt.Data().CapLength(int(ipHdr.TotalLength()) - len(hdr))
return true
}
// IPv6 parses an IPv6 packet found in pkt.Data and populates pkt's network
// header with the IPv6 header.
func IPv6(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNumber, fragID uint32, fragOffset uint16, fragMore bool, ok bool) {
- hdr, ok := pkt.Data.PullUp(header.IPv6MinimumSize)
+ hdr, ok := pkt.Data().PullUp(header.IPv6MinimumSize)
if !ok {
return 0, 0, 0, false, false
}
ipHdr := header.IPv6(hdr)
- // dataClone consists of:
+ // Create a VV to parse the packet. We don't plan to modify anything here.
+ // dataVV consists of:
// - Any IPv6 header bytes after the first 40 (i.e. extensions).
// - The transport header, if present.
// - Any other payload data.
views := [8]buffer.View{}
- dataClone := pkt.Data.Clone(views[:])
- dataClone.TrimFront(header.IPv6MinimumSize)
- it := header.MakeIPv6PayloadIterator(header.IPv6ExtensionHeaderIdentifier(ipHdr.NextHeader()), dataClone)
+ dataVV := buffer.NewVectorisedView(0, views[:0])
+ dataVV.AppendViews(pkt.Data().Views())
+ dataVV.TrimFront(header.IPv6MinimumSize)
+ it := header.MakeIPv6PayloadIterator(header.IPv6ExtensionHeaderIdentifier(ipHdr.NextHeader()), dataVV)
// Iterate over the IPv6 extensions to find their length.
var nextHdr tcpip.TransportProtocolNumber
@@ -98,7 +100,7 @@ traverseExtensions:
// If we exhaust the extension list, the entire packet is the IPv6 header
// and (possibly) extensions.
if done {
- extensionsSize = dataClone.Size()
+ extensionsSize = dataVV.Size()
break
}
@@ -110,12 +112,12 @@ traverseExtensions:
fragMore = extHdr.More()
}
rawPayload := it.AsRawHeader(true /* consume */)
- extensionsSize = dataClone.Size() - rawPayload.Buf.Size()
+ extensionsSize = dataVV.Size() - rawPayload.Buf.Size()
break traverseExtensions
case header.IPv6RawPayloadHeader:
// We've found the payload after any extensions.
- extensionsSize = dataClone.Size() - extHdr.Buf.Size()
+ extensionsSize = dataVV.Size() - extHdr.Buf.Size()
nextHdr = tcpip.TransportProtocolNumber(extHdr.Identifier)
break traverseExtensions
@@ -127,10 +129,10 @@ traverseExtensions:
// Put the IPv6 header with extensions in pkt.NetworkHeader().
hdr, ok = pkt.NetworkHeader().Consume(header.IPv6MinimumSize + extensionsSize)
if !ok {
- panic(fmt.Sprintf("pkt.Data should have at least %d bytes, but only has %d.", header.IPv6MinimumSize+extensionsSize, pkt.Data.Size()))
+ panic(fmt.Sprintf("pkt.Data should have at least %d bytes, but only has %d.", header.IPv6MinimumSize+extensionsSize, pkt.Data().Size()))
}
ipHdr = header.IPv6(hdr)
- pkt.Data.CapLength(int(ipHdr.PayloadLength()))
+ pkt.Data().CapLength(int(ipHdr.PayloadLength()))
pkt.NetworkProtocolNumber = header.IPv6ProtocolNumber
return nextHdr, fragID, fragOffset, fragMore, true
@@ -153,13 +155,13 @@ func UDP(pkt *stack.PacketBuffer) bool {
func TCP(pkt *stack.PacketBuffer) bool {
// TCP header is variable length, peek at it first.
hdrLen := header.TCPMinimumSize
- hdr, ok := pkt.Data.PullUp(hdrLen)
+ hdr, ok := pkt.Data().PullUp(hdrLen)
if !ok {
return false
}
// If the header has options, pull those up as well.
- if offset := int(header.TCP(hdr).DataOffset()); offset > header.TCPMinimumSize && offset <= pkt.Data.Size() {
+ if offset := int(header.TCP(hdr).DataOffset()); offset > header.TCPMinimumSize && offset <= pkt.Data().Size() {
// TODO(gvisor.dev/issue/2404): Figure out whether to reject this kind of
// packets.
hdrLen = offset
diff --git a/pkg/tcpip/header/tcp.go b/pkg/tcpip/header/tcp.go
index 4c6f808e5..adc835d30 100644
--- a/pkg/tcpip/header/tcp.go
+++ b/pkg/tcpip/header/tcp.go
@@ -45,9 +45,23 @@ const (
TCPMaxSACKBlocks = 4
)
+// TCPFlags is the dedicated type for TCP flags.
+type TCPFlags uint8
+
+// String implements Stringer.String.
+func (f TCPFlags) String() string {
+ flagsStr := []byte("FSRPAU")
+ for i := range flagsStr {
+ if f&(1<<uint(i)) == 0 {
+ flagsStr[i] = ' '
+ }
+ }
+ return string(flagsStr)
+}
+
// Flags that may be set in a TCP segment.
const (
- TCPFlagFin = 1 << iota
+ TCPFlagFin TCPFlags = 1 << iota
TCPFlagSyn
TCPFlagRst
TCPFlagPsh
@@ -94,7 +108,7 @@ type TCPFields struct {
DataOffset uint8
// Flags is the "flags" field of a TCP packet.
- Flags uint8
+ Flags TCPFlags
// WindowSize is the "window size" field of a TCP packet.
WindowSize uint16
@@ -234,8 +248,8 @@ func (b TCP) Payload() []byte {
}
// Flags returns the flags field of the tcp header.
-func (b TCP) Flags() uint8 {
- return b[TCPFlagsOffset]
+func (b TCP) Flags() TCPFlags {
+ return TCPFlags(b[TCPFlagsOffset])
}
// WindowSize returns the "window size" field of the tcp header.
@@ -319,10 +333,10 @@ func (b TCP) ParsedOptions() TCPOptions {
return ParseTCPOptions(b.Options())
}
-func (b TCP) encodeSubset(seq, ack uint32, flags uint8, rcvwnd uint16) {
+func (b TCP) encodeSubset(seq, ack uint32, flags TCPFlags, rcvwnd uint16) {
binary.BigEndian.PutUint32(b[TCPSeqNumOffset:], seq)
binary.BigEndian.PutUint32(b[TCPAckNumOffset:], ack)
- b[TCPFlagsOffset] = flags
+ b[TCPFlagsOffset] = uint8(flags)
binary.BigEndian.PutUint16(b[TCPWinSizeOffset:], rcvwnd)
}
@@ -338,7 +352,7 @@ func (b TCP) Encode(t *TCPFields) {
// EncodePartial updates a subset of the fields of the tcp header. It is useful
// in cases when similar segments are produced.
-func (b TCP) EncodePartial(partialChecksum, length uint16, seqnum, acknum uint32, flags byte, rcvwnd uint16) {
+func (b TCP) EncodePartial(partialChecksum, length uint16, seqnum, acknum uint32, flags TCPFlags, rcvwnd uint16) {
// Add the total length and "flags" field contributions to the checksum.
// We don't use the flags field directly from the header because it's a
// one-byte field with an odd offset, so it would be accounted for
diff --git a/pkg/tcpip/header/tcp_test.go b/pkg/tcpip/header/tcp_test.go
index 72563837b..96db8460f 100644
--- a/pkg/tcpip/header/tcp_test.go
+++ b/pkg/tcpip/header/tcp_test.go
@@ -146,3 +146,23 @@ func TestTCPParseOptions(t *testing.T) {
}
}
}
+
+func TestTCPFlags(t *testing.T) {
+ for _, tt := range []struct {
+ flags header.TCPFlags
+ want string
+ }{
+ {header.TCPFlagFin, "F "},
+ {header.TCPFlagSyn, " S "},
+ {header.TCPFlagRst, " R "},
+ {header.TCPFlagPsh, " P "},
+ {header.TCPFlagAck, " A "},
+ {header.TCPFlagUrg, " U"},
+ {header.TCPFlagSyn | header.TCPFlagAck, " S A "},
+ {header.TCPFlagFin | header.TCPFlagAck, "F A "},
+ } {
+ if got := tt.flags.String(); got != tt.want {
+ t.Errorf("got TCPFlags(%#b).String() = %s, want = %s", tt.flags, got, tt.want)
+ }
+ }
+}
diff --git a/pkg/tcpip/link/fdbased/endpoint.go b/pkg/tcpip/link/fdbased/endpoint.go
index 72d3f70ac..e17e2085c 100644
--- a/pkg/tcpip/link/fdbased/endpoint.go
+++ b/pkg/tcpip/link/fdbased/endpoint.go
@@ -427,7 +427,7 @@ func (e *endpoint) WritePacket(r stack.RouteInfo, gso *stack.GSO, protocol tcpip
vnetHdr.csumStart = header.EthernetMinimumSize + gso.L3HdrLen
vnetHdr.csumOffset = gso.CsumOffset
}
- if gso.Type != stack.GSONone && uint16(pkt.Data.Size()) > gso.MSS {
+ if gso.Type != stack.GSONone && uint16(pkt.Data().Size()) > gso.MSS {
switch gso.Type {
case stack.GSOTCPv4:
vnetHdr.gsoType = _VIRTIO_NET_HDR_GSO_TCPV4
@@ -468,7 +468,7 @@ func (e *endpoint) sendBatch(batchFD int, batch []*stack.PacketBuffer) (int, tcp
vnetHdr.csumStart = header.EthernetMinimumSize + pkt.GSOOptions.L3HdrLen
vnetHdr.csumOffset = pkt.GSOOptions.CsumOffset
}
- if pkt.GSOOptions.Type != stack.GSONone && uint16(pkt.Data.Size()) > pkt.GSOOptions.MSS {
+ if pkt.GSOOptions.Type != stack.GSONone && uint16(pkt.Data().Size()) > pkt.GSOOptions.MSS {
switch pkt.GSOOptions.Type {
case stack.GSOTCPv4:
vnetHdr.gsoType = _VIRTIO_NET_HDR_GSO_TCPV4
diff --git a/pkg/tcpip/link/fdbased/endpoint_test.go b/pkg/tcpip/link/fdbased/endpoint_test.go
index 358a030d2..1e40f3fef 100644
--- a/pkg/tcpip/link/fdbased/endpoint_test.go
+++ b/pkg/tcpip/link/fdbased/endpoint_test.go
@@ -67,7 +67,7 @@ func checkPacketInfoEqual(t *testing.T, got, want packetInfo) {
LinkHeader: pk.LinkHeader().View(),
NetworkHeader: pk.NetworkHeader().View(),
TransportHeader: pk.TransportHeader().View(),
- Data: pk.Data.ToView(),
+ Data: pk.Data().AsRange().ToOwnedView(),
}
}),
); diff != "" {
@@ -616,8 +616,8 @@ func TestDispatchPacketFormat(t *testing.T) {
if got, want := pkt.LinkHeader().View().Size(), header.EthernetMinimumSize; got != want {
t.Errorf("pkt.LinkHeader().View().Size() = %d, want %d", got, want)
}
- if got, want := pkt.Data.Size(), 4; got != want {
- t.Errorf("pkt.Data.Size() = %d, want %d", got, want)
+ if got, want := pkt.Data().Size(), 4; got != want {
+ t.Errorf("pkt.Data().Size() = %d, want %d", got, want)
}
})
}
diff --git a/pkg/tcpip/link/fdbased/packet_dispatchers.go b/pkg/tcpip/link/fdbased/packet_dispatchers.go
index 736871d1c..46df87f44 100644
--- a/pkg/tcpip/link/fdbased/packet_dispatchers.go
+++ b/pkg/tcpip/link/fdbased/packet_dispatchers.go
@@ -165,7 +165,7 @@ func (d *readVDispatcher) dispatch() (bool, tcpip.Error) {
// We don't get any indication of what the packet is, so try to guess
// if it's an IPv4 or IPv6 packet.
// IP version information is at the first octet, so pulling up 1 byte.
- h, ok := pkt.Data.PullUp(1)
+ h, ok := pkt.Data().PullUp(1)
if !ok {
return true, nil
}
@@ -270,7 +270,7 @@ func (d *recvMMsgDispatcher) dispatch() (bool, tcpip.Error) {
// We don't get any indication of what the packet is, so try to guess
// if it's an IPv4 or IPv6 packet.
// IP version information is at the first octet, so pulling up 1 byte.
- h, ok := pkt.Data.PullUp(1)
+ h, ok := pkt.Data().PullUp(1)
if !ok {
// Skip this packet.
continue
diff --git a/pkg/tcpip/link/sharedmem/sharedmem_test.go b/pkg/tcpip/link/sharedmem/sharedmem_test.go
index def47772f..d4b3ddd5c 100644
--- a/pkg/tcpip/link/sharedmem/sharedmem_test.go
+++ b/pkg/tcpip/link/sharedmem/sharedmem_test.go
@@ -80,7 +80,7 @@ func (q *queueBuffers) cleanup() {
type packetInfo struct {
addr tcpip.LinkAddress
proto tcpip.NetworkProtocolNumber
- vv buffer.VectorisedView
+ data buffer.View
linkHeader buffer.View
}
@@ -136,7 +136,7 @@ func (c *testContext) DeliverNetworkPacket(remoteLinkAddr, localLinkAddr tcpip.L
c.packets = append(c.packets, packetInfo{
addr: remoteLinkAddr,
proto: proto,
- vv: pkt.Data.Clone(nil),
+ data: pkt.Data().AsRange().ToOwnedView(),
})
c.mu.Unlock()
@@ -676,7 +676,7 @@ func TestSimpleReceive(t *testing.T) {
// Wait for packet to be received, then check it.
c.waitForPackets(1, time.After(5*time.Second), "Timeout waiting for packet")
c.mu.Lock()
- rcvd := []byte(c.packets[0].vv.ToView())
+ rcvd := []byte(c.packets[0].data)
c.packets = c.packets[:0]
c.mu.Unlock()
diff --git a/pkg/tcpip/link/sniffer/sniffer.go b/pkg/tcpip/link/sniffer/sniffer.go
index bd2b8d4bf..7aaee3d13 100644
--- a/pkg/tcpip/link/sniffer/sniffer.go
+++ b/pkg/tcpip/link/sniffer/sniffer.go
@@ -290,7 +290,7 @@ func logPacket(prefix string, dir direction, protocol tcpip.NetworkProtocolNumbe
switch tcpip.TransportProtocolNumber(transProto) {
case header.ICMPv4ProtocolNumber:
transName = "icmp"
- hdr, ok := pkt.Data.PullUp(header.ICMPv4MinimumSize)
+ hdr, ok := pkt.Data().PullUp(header.ICMPv4MinimumSize)
if !ok {
break
}
@@ -327,7 +327,7 @@ func logPacket(prefix string, dir direction, protocol tcpip.NetworkProtocolNumbe
case header.ICMPv6ProtocolNumber:
transName = "icmp"
- hdr, ok := pkt.Data.PullUp(header.ICMPv6MinimumSize)
+ hdr, ok := pkt.Data().PullUp(header.ICMPv6MinimumSize)
if !ok {
break
}
@@ -387,7 +387,7 @@ func logPacket(prefix string, dir direction, protocol tcpip.NetworkProtocolNumbe
details += fmt.Sprintf("invalid packet: tcp data offset too small %d", offset)
break
}
- if size := pkt.Data.Size() + len(tcp); offset > size && !moreFragments {
+ if size := pkt.Data().Size() + len(tcp); offset > size && !moreFragments {
details += fmt.Sprintf("invalid packet: tcp data offset %d larger than tcp packet length %d", offset, size)
break
}
@@ -398,13 +398,7 @@ func logPacket(prefix string, dir direction, protocol tcpip.NetworkProtocolNumbe
// Initialize the TCP flags.
flags := tcp.Flags()
- flagsStr := []byte("FSRPAU")
- for i := range flagsStr {
- if flags&(1<<uint(i)) == 0 {
- flagsStr[i] = ' '
- }
- }
- details = fmt.Sprintf("flags:0x%02x (%s) seqnum: %d ack: %d win: %d xsum:0x%x", flags, string(flagsStr), tcp.SequenceNumber(), tcp.AckNumber(), tcp.WindowSize(), tcp.Checksum())
+ details = fmt.Sprintf("flags: %s seqnum: %d ack: %d win: %d xsum:0x%x", flags, tcp.SequenceNumber(), tcp.AckNumber(), tcp.WindowSize(), tcp.Checksum())
if flags&header.TCPFlagSyn != 0 {
details += fmt.Sprintf(" options: %+v", header.ParseSynOptions(tcp.Options(), flags&header.TCPFlagAck != 0))
} else {
diff --git a/pkg/tcpip/link/tun/device.go b/pkg/tcpip/link/tun/device.go
index 3829ca9c9..c1678c4f4 100644
--- a/pkg/tcpip/link/tun/device.go
+++ b/pkg/tcpip/link/tun/device.go
@@ -281,7 +281,7 @@ func (d *Device) encodePkt(info *channel.PacketInfo) (buffer.View, bool) {
vv.AppendView(info.Pkt.NetworkHeader().View())
vv.AppendView(info.Pkt.TransportHeader().View())
// Append data payload.
- vv.Append(info.Pkt.Data)
+ vv.Append(info.Pkt.Data().ExtractVV())
return vv.ToView(), true
}
diff --git a/pkg/tcpip/network/arp/arp.go b/pkg/tcpip/network/arp/arp.go
index 3fcdea119..ae0461a6d 100644
--- a/pkg/tcpip/network/arp/arp.go
+++ b/pkg/tcpip/network/arp/arp.go
@@ -232,7 +232,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {
linkAddr := tcpip.LinkAddress(h.HardwareAddressSender())
e.mu.Lock()
- e.mu.dad.StopLocked(addr, false /* aborted */)
+ e.mu.dad.StopLocked(addr, &stack.DADDupAddrDetected{HolderLinkAddress: linkAddr})
e.mu.Unlock()
// The solicited, override, and isRouter flags are not available for ARP;
diff --git a/pkg/tcpip/network/internal/fragmentation/fragmentation.go b/pkg/tcpip/network/internal/fragmentation/fragmentation.go
index 243738951..5168f5361 100644
--- a/pkg/tcpip/network/internal/fragmentation/fragmentation.go
+++ b/pkg/tcpip/network/internal/fragmentation/fragmentation.go
@@ -170,7 +170,7 @@ func (f *Fragmentation) Process(
return nil, 0, false, fmt.Errorf("fragment size=%d bytes is not a multiple of block size=%d on non-final fragment: %w", fragmentSize, f.blockSize, ErrInvalidArgs)
}
- if l := pkt.Data.Size(); l != int(fragmentSize) {
+ if l := pkt.Data().Size(); l != int(fragmentSize) {
return nil, 0, false, fmt.Errorf("got fragment size=%d bytes not equal to the expected fragment size=%d bytes (first=%d last=%d): %w", l, fragmentSize, first, last, ErrInvalidArgs)
}
@@ -293,7 +293,7 @@ func MakePacketFragmenter(pkt *stack.PacketBuffer, fragmentPayloadLen uint32, re
// these headers.
var fragmentableData buffer.VectorisedView
fragmentableData.AppendView(pkt.TransportHeader().View())
- fragmentableData.Append(pkt.Data)
+ fragmentableData.Append(pkt.Data().ExtractVV())
fragmentCount := (uint32(fragmentableData.Size()) + fragmentPayloadLen - 1) / fragmentPayloadLen
return PacketFragmenter{
@@ -323,7 +323,7 @@ func (pf *PacketFragmenter) BuildNextFragment() (*stack.PacketBuffer, int, int,
})
// Copy data for the fragment.
- copied := pf.data.ReadToVV(&fragPkt.Data, pf.fragmentPayloadLen)
+ copied := fragPkt.Data().ReadFromVV(&pf.data, pf.fragmentPayloadLen)
offset := pf.fragmentOffset
pf.fragmentOffset += copied
diff --git a/pkg/tcpip/network/internal/fragmentation/fragmentation_test.go b/pkg/tcpip/network/internal/fragmentation/fragmentation_test.go
index 47ea3173e..7daf64b4a 100644
--- a/pkg/tcpip/network/internal/fragmentation/fragmentation_test.go
+++ b/pkg/tcpip/network/internal/fragmentation/fragmentation_test.go
@@ -121,7 +121,7 @@ func TestFragmentationProcess(t *testing.T) {
in.id, in.first, in.last, in.more, in.proto, done, c.out[i].done)
}
if c.out[i].done {
- if diff := cmp.Diff(c.out[i].vv.ToOwnedView(), resPkt.Data.ToOwnedView()); diff != "" {
+ if diff := cmp.Diff(c.out[i].vv.ToOwnedView(), resPkt.Data().AsRange().ToOwnedView()); diff != "" {
t.Errorf("got Process(%+v, %d, %d, %t, %d, %#v) result mismatch (-want, +got):\n%s",
in.id, in.first, in.last, in.more, in.proto, in.pkt, diff)
}
@@ -470,9 +470,7 @@ func TestPacketFragmenter(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
pkt := testutil.MakeRandPkt(test.transportHeaderLen, reserve, []int{test.payloadSize}, proto)
- var originalPayload buffer.VectorisedView
- originalPayload.AppendView(pkt.TransportHeader().View())
- originalPayload.Append(pkt.Data)
+ originalPayload := stack.PayloadSince(pkt.TransportHeader())
var reassembledPayload buffer.VectorisedView
pf := MakePacketFragmenter(pkt, test.fragmentPayloadLen, reserve)
for i := 0; ; i++ {
@@ -499,7 +497,7 @@ func TestPacketFragmenter(t *testing.T) {
if got := fragPkt.TransportHeader().View().Size(); got != 0 {
t.Errorf("(fragment #%d) got fragPkt.TransportHeader().View().Size() = %d, want = 0", i, got)
}
- reassembledPayload.Append(fragPkt.Data)
+ reassembledPayload.AppendViews(fragPkt.Data().Views())
if !more {
if i != len(test.wantFragments)-1 {
t.Errorf("got fragment count = %d, want = %d", i, len(test.wantFragments)-1)
@@ -507,7 +505,7 @@ func TestPacketFragmenter(t *testing.T) {
break
}
}
- if diff := cmp.Diff(reassembledPayload.ToView(), originalPayload.ToView()); diff != "" {
+ if diff := cmp.Diff(reassembledPayload.ToView(), originalPayload); diff != "" {
t.Errorf("reassembledPayload mismatch (-want +got):\n%s", diff)
}
})
@@ -625,11 +623,11 @@ func TestTimeoutHandler(t *testing.T) {
}
switch {
case handler.pkt != nil && test.wantPkt == nil:
- t.Errorf("got handler.pkt = not nil (pkt.Data = %x), want = nil", handler.pkt.Data.ToView())
+ t.Errorf("got handler.pkt = not nil (pkt.Data = %x), want = nil", handler.pkt.Data().AsRange().ToOwnedView())
case handler.pkt == nil && test.wantPkt != nil:
- t.Errorf("got handler.pkt = nil, want = not nil (pkt.Data = %x)", test.wantPkt.Data.ToView())
+ t.Errorf("got handler.pkt = nil, want = not nil (pkt.Data = %x)", test.wantPkt.Data().AsRange().ToOwnedView())
case handler.pkt != nil && test.wantPkt != nil:
- if diff := cmp.Diff(test.wantPkt.Data.ToView(), handler.pkt.Data.ToView()); diff != "" {
+ if diff := cmp.Diff(test.wantPkt.Data().AsRange().ToOwnedView(), handler.pkt.Data().AsRange().ToOwnedView()); diff != "" {
t.Errorf("pkt.Data mismatch (-want, +got):\n%s", diff)
}
}
diff --git a/pkg/tcpip/network/internal/fragmentation/reassembler.go b/pkg/tcpip/network/internal/fragmentation/reassembler.go
index 933d63d32..90075a70c 100644
--- a/pkg/tcpip/network/internal/fragmentation/reassembler.go
+++ b/pkg/tcpip/network/internal/fragmentation/reassembler.go
@@ -167,8 +167,8 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *s
resPkt := r.holes[0].pkt
for i := 1; i < len(r.holes); i++ {
- fragPkt := r.holes[i].pkt
- fragPkt.Data.ReadToVV(&resPkt.Data, fragPkt.Data.Size())
+ fragData := r.holes[i].pkt.Data()
+ resPkt.Data().ReadFromData(fragData, fragData.Size())
}
return resPkt, r.proto, true, memConsumed, nil
}
diff --git a/pkg/tcpip/network/internal/fragmentation/reassembler_test.go b/pkg/tcpip/network/internal/fragmentation/reassembler_test.go
index 214a93709..cfd9f00ef 100644
--- a/pkg/tcpip/network/internal/fragmentation/reassembler_test.go
+++ b/pkg/tcpip/network/internal/fragmentation/reassembler_test.go
@@ -204,7 +204,7 @@ func TestReassemblerProcess(t *testing.T) {
if a == nil || b == nil {
return a == b
}
- return bytes.Equal(a.Data.ToOwnedView(), b.Data.ToOwnedView())
+ return bytes.Equal(a.Data().AsRange().ToOwnedView(), b.Data().AsRange().ToOwnedView())
}
if isDone {
diff --git a/pkg/tcpip/network/internal/ip/duplicate_address_detection.go b/pkg/tcpip/network/internal/ip/duplicate_address_detection.go
index 6f89a6a16..0053646ee 100644
--- a/pkg/tcpip/network/internal/ip/duplicate_address_detection.go
+++ b/pkg/tcpip/network/internal/ip/duplicate_address_detection.go
@@ -126,9 +126,12 @@ func (d *DAD) CheckDuplicateAddressLocked(addr tcpip.Address, h stack.DADComplet
s.timer.Stop()
delete(d.addresses, addr)
- r := stack.DADResult{Resolved: dadDone, Err: err}
+ var res stack.DADResult = &stack.DADSucceeded{}
+ if err != nil {
+ res = &stack.DADError{Err: err}
+ }
for _, h := range s.completionHandlers {
- h(r)
+ h(res)
}
}),
}
@@ -142,7 +145,7 @@ func (d *DAD) CheckDuplicateAddressLocked(addr tcpip.Address, h stack.DADComplet
// StopLocked stops a currently running DAD process.
//
// Precondition: d.protocolMU must be locked.
-func (d *DAD) StopLocked(addr tcpip.Address, aborted bool) {
+func (d *DAD) StopLocked(addr tcpip.Address, reason stack.DADResult) {
s, ok := d.addresses[addr]
if !ok {
return
@@ -152,14 +155,8 @@ func (d *DAD) StopLocked(addr tcpip.Address, aborted bool) {
s.timer.Stop()
delete(d.addresses, addr)
- var err tcpip.Error
- if aborted {
- err = &tcpip.ErrAborted{}
- }
-
- r := stack.DADResult{Resolved: false, Err: err}
for _, h := range s.completionHandlers {
- h(r)
+ h(reason)
}
}
diff --git a/pkg/tcpip/network/internal/ip/duplicate_address_detection_test.go b/pkg/tcpip/network/internal/ip/duplicate_address_detection_test.go
index 18c357b56..e00aa4678 100644
--- a/pkg/tcpip/network/internal/ip/duplicate_address_detection_test.go
+++ b/pkg/tcpip/network/internal/ip/duplicate_address_detection_test.go
@@ -78,10 +78,10 @@ func (m *mockDADProtocol) checkDuplicateAddress(addr tcpip.Address, h stack.DADC
return m.mu.dad.CheckDuplicateAddressLocked(addr, h)
}
-func (m *mockDADProtocol) stop(addr tcpip.Address, aborted bool) {
+func (m *mockDADProtocol) stop(addr tcpip.Address, reason stack.DADResult) {
m.mu.Lock()
defer m.mu.Unlock()
- m.mu.dad.StopLocked(addr, aborted)
+ m.mu.dad.StopLocked(addr, reason)
}
func (m *mockDADProtocol) setConfigs(c stack.DADConfigurations) {
@@ -175,7 +175,7 @@ func TestDADCheckDuplicateAddress(t *testing.T) {
}
clock.Advance(delta)
for i := 0; i < 2; i++ {
- if diff := cmp.Diff(dadResult{Addr: addr1, R: stack.DADResult{Resolved: true, Err: nil}}, <-ch); diff != "" {
+ if diff := cmp.Diff(dadResult{Addr: addr1, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
t.Errorf("(i=%d) dad result mismatch (-want +got):\n%s", i, diff)
}
}
@@ -189,7 +189,7 @@ func TestDADCheckDuplicateAddress(t *testing.T) {
default:
}
clock.Advance(delta)
- if diff := cmp.Diff(dadResult{Addr: addr2, R: stack.DADResult{Resolved: true, Err: nil}}, <-ch); diff != "" {
+ if diff := cmp.Diff(dadResult{Addr: addr2, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
t.Errorf("dad result mismatch (-want +got):\n%s", diff)
}
@@ -202,7 +202,7 @@ func TestDADCheckDuplicateAddress(t *testing.T) {
t.Errorf("dad check mismatch (-want +got):\n%s", diff)
}
clock.Advance(dadConfig2Duration)
- if diff := cmp.Diff(dadResult{Addr: addr2, R: stack.DADResult{Resolved: true, Err: nil}}, <-ch); diff != "" {
+ if diff := cmp.Diff(dadResult{Addr: addr2, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
t.Errorf("dad result mismatch (-want +got):\n%s", diff)
}
@@ -241,19 +241,19 @@ func TestDADStop(t *testing.T) {
t.Errorf("dad check mismatch (-want +got):\n%s", diff)
}
- dad.stop(addr1, true /* aborted */)
- if diff := cmp.Diff(dadResult{Addr: addr1, R: stack.DADResult{Resolved: false, Err: &tcpip.ErrAborted{}}}, <-ch); diff != "" {
+ dad.stop(addr1, &stack.DADAborted{})
+ if diff := cmp.Diff(dadResult{Addr: addr1, R: &stack.DADAborted{}}, <-ch); diff != "" {
t.Errorf("dad result mismatch (-want +got):\n%s", diff)
}
- dad.stop(addr2, false /* aborted */)
- if diff := cmp.Diff(dadResult{Addr: addr2, R: stack.DADResult{Resolved: false, Err: nil}}, <-ch); diff != "" {
+ dad.stop(addr2, &stack.DADDupAddrDetected{})
+ if diff := cmp.Diff(dadResult{Addr: addr2, R: &stack.DADDupAddrDetected{}}, <-ch); diff != "" {
t.Errorf("dad result mismatch (-want +got):\n%s", diff)
}
dadResolutionDuration := time.Duration(dadConfigs.DupAddrDetectTransmits) * dadConfigs.RetransmitTimer
clock.Advance(dadResolutionDuration)
- if diff := cmp.Diff(dadResult{Addr: addr3, R: stack.DADResult{Resolved: true, Err: nil}}, <-ch); diff != "" {
+ if diff := cmp.Diff(dadResult{Addr: addr3, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
t.Errorf("dad result mismatch (-want +got):\n%s", diff)
}
@@ -266,7 +266,7 @@ func TestDADStop(t *testing.T) {
t.Errorf("dad check mismatch (-want +got):\n%s", diff)
}
clock.Advance(dadResolutionDuration)
- if diff := cmp.Diff(dadResult{Addr: addr1, R: stack.DADResult{Resolved: true, Err: nil}}, <-ch); diff != "" {
+ if diff := cmp.Diff(dadResult{Addr: addr1, R: &stack.DADSucceeded{}}, <-ch); diff != "" {
t.Errorf("dad result mismatch (-want +got):\n%s", diff)
}
diff --git a/pkg/tcpip/network/internal/ip/stats.go b/pkg/tcpip/network/internal/ip/stats.go
index 5f7e60c5c..b6f39ddb1 100644
--- a/pkg/tcpip/network/internal/ip/stats.go
+++ b/pkg/tcpip/network/internal/ip/stats.go
@@ -69,8 +69,8 @@ type MultiCounterIPStats struct {
IPTablesOutputDropped tcpip.MultiCounterStat
// TODO(https://gvisor.dev/issues/5529): Move the IPv4-only option stats out
-
// of IPStats.
+
// OptionTimestampReceived is the number of Timestamp options seen.
OptionTimestampReceived tcpip.MultiCounterStat
diff --git a/pkg/tcpip/network/ip_test.go b/pkg/tcpip/network/ip_test.go
index 90236ed9e..aee1652fa 100644
--- a/pkg/tcpip/network/ip_test.go
+++ b/pkg/tcpip/network/ip_test.go
@@ -90,8 +90,7 @@ type testObject struct {
// checkValues verifies that the transport protocol, data contents, src & dst
// addresses of a packet match what's expected. If any field doesn't match, the
// test fails.
-func (t *testObject) checkValues(protocol tcpip.TransportProtocolNumber, vv buffer.VectorisedView, srcAddr, dstAddr tcpip.Address) {
- v := vv.ToView()
+func (t *testObject) checkValues(protocol tcpip.TransportProtocolNumber, v buffer.View, srcAddr, dstAddr tcpip.Address) {
if protocol != t.protocol {
t.t.Errorf("protocol = %v, want %v", protocol, t.protocol)
}
@@ -120,7 +119,7 @@ func (t *testObject) checkValues(protocol tcpip.TransportProtocolNumber, vv buff
// parsing are expected.
func (t *testObject) DeliverTransportPacket(protocol tcpip.TransportProtocolNumber, pkt *stack.PacketBuffer) stack.TransportPacketDisposition {
netHdr := pkt.Network()
- t.checkValues(protocol, pkt.Data, netHdr.SourceAddress(), netHdr.DestinationAddress())
+ t.checkValues(protocol, pkt.Data().AsRange().ToOwnedView(), netHdr.SourceAddress(), netHdr.DestinationAddress())
t.dataCalls++
return stack.TransportPacketHandled
}
@@ -129,7 +128,7 @@ func (t *testObject) DeliverTransportPacket(protocol tcpip.TransportProtocolNumb
// incoming control (ICMP) packets. This is used by the test object to verify
// that the results of the parsing are expected.
func (t *testObject) DeliverTransportError(local, remote tcpip.Address, net tcpip.NetworkProtocolNumber, trans tcpip.TransportProtocolNumber, transErr stack.TransportError, pkt *stack.PacketBuffer) {
- t.checkValues(trans, pkt.Data, remote, local)
+ t.checkValues(trans, pkt.Data().AsRange().ToOwnedView(), remote, local)
if diff := cmp.Diff(
t.transErr,
transportError{
@@ -198,7 +197,7 @@ func (t *testObject) WritePacket(_ *stack.Route, _ *stack.GSO, protocol tcpip.Ne
srcAddr = h.SourceAddress()
dstAddr = h.DestinationAddress()
}
- t.checkValues(prot, pkt.Data, srcAddr, dstAddr)
+ t.checkValues(prot, pkt.Data().AsRange().ToOwnedView(), srcAddr, dstAddr)
return nil
}
@@ -371,7 +370,11 @@ func TestSourceAddressValidation(t *testing.T) {
pkt.SetType(header.ICMPv6EchoRequest)
pkt.SetCode(0)
pkt.SetChecksum(0)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, src, localIPv6Addr, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: src,
+ Dst: localIPv6Addr,
+ }))
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
PayloadLength: header.ICMPv6MinimumSize,
@@ -1199,7 +1202,11 @@ func TestIPv6ReceiveControl(t *testing.T) {
nic.testObject.transErr = c.transErr
// Set ICMPv6 checksum.
- icmp.SetChecksum(header.ICMPv6Checksum(icmp, outerSrcAddr, localIPv6Addr, buffer.VectorisedView{}))
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: outerSrcAddr,
+ Dst: localIPv6Addr,
+ }))
addressableEndpoint, ok := ep.(stack.AddressableEndpoint)
if !ok {
diff --git a/pkg/tcpip/network/ipv4/BUILD b/pkg/tcpip/network/ipv4/BUILD
index 4b21ee79c..5e7f10f4b 100644
--- a/pkg/tcpip/network/ipv4/BUILD
+++ b/pkg/tcpip/network/ipv4/BUILD
@@ -32,12 +32,14 @@ go_test(
"ipv4_test.go",
],
deps = [
+ "//pkg/sync",
"//pkg/tcpip",
"//pkg/tcpip/buffer",
"//pkg/tcpip/checker",
"//pkg/tcpip/faketime",
"//pkg/tcpip/header",
"//pkg/tcpip/link/channel",
+ "//pkg/tcpip/link/loopback",
"//pkg/tcpip/link/sniffer",
"//pkg/tcpip/network/arp",
"//pkg/tcpip/network/internal/testutil",
diff --git a/pkg/tcpip/network/ipv4/icmp.go b/pkg/tcpip/network/ipv4/icmp.go
index bd0eabad1..deb104837 100644
--- a/pkg/tcpip/network/ipv4/icmp.go
+++ b/pkg/tcpip/network/ipv4/icmp.go
@@ -137,7 +137,7 @@ func (e *endpoint) checkLocalAddress(addr tcpip.Address) bool {
// is used to find out which transport endpoint must be notified about the ICMP
// packet. We only expect the payload, not the enclosing ICMP packet.
func (e *endpoint) handleControl(errInfo stack.TransportError, pkt *stack.PacketBuffer) {
- h, ok := pkt.Data.PullUp(header.IPv4MinimumSize)
+ h, ok := pkt.Data().PullUp(header.IPv4MinimumSize)
if !ok {
return
}
@@ -156,7 +156,7 @@ func (e *endpoint) handleControl(errInfo stack.TransportError, pkt *stack.Packet
}
hlen := int(hdr.HeaderLength())
- if pkt.Data.Size() < hlen || hdr.FragmentOffset() != 0 {
+ if pkt.Data().Size() < hlen || hdr.FragmentOffset() != 0 {
// We won't be able to handle this if it doesn't contain the
// full IPv4 header, or if it's a fragment not at offset 0
// (because it won't have the transport header).
@@ -164,7 +164,7 @@ func (e *endpoint) handleControl(errInfo stack.TransportError, pkt *stack.Packet
}
// Skip the ip header, then deliver the error.
- pkt.Data.TrimFront(hlen)
+ pkt.Data().TrimFront(hlen)
p := hdr.TransportProtocol()
e.dispatcher.DeliverTransportError(srcAddr, hdr.DestinationAddress(), ProtocolNumber, p, errInfo, pkt)
}
@@ -174,7 +174,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {
// TODO(gvisor.dev/issue/170): ICMP packets don't have their
// TransportHeader fields set. See icmp/protocol.go:protocol.Parse for a
// full explanation.
- v, ok := pkt.Data.PullUp(header.ICMPv4MinimumSize)
+ v, ok := pkt.Data().PullUp(header.ICMPv4MinimumSize)
if !ok {
received.invalid.Increment()
return
@@ -182,7 +182,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {
h := header.ICMPv4(v)
// Only do in-stack processing if the checksum is correct.
- if header.ChecksumVV(pkt.Data, 0 /* initial */) != 0xffff {
+ if pkt.Data().AsRange().Checksum() != 0xffff {
received.invalid.Increment()
// It's possible that a raw socket expects to receive this regardless
// of checksum errors. If it's an echo request we know it's safe because
@@ -238,7 +238,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {
// TODO(b/112892170): Meaningfully handle all ICMP types.
switch h.Type() {
case header.ICMPv4Echo:
- received.echo.Increment()
+ received.echoRequest.Increment()
sent := e.stats.icmp.packetsSent
if !e.protocol.stack.AllowICMPMessage() {
@@ -253,7 +253,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {
// TODO(gvisor.dev/issue/4399): The copy may not be needed if there are no
// waiting endpoints. Consider moving responsibility for doing the copy to
// DeliverTransportPacket so that is is only done when needed.
- replyData := pkt.Data.ToOwnedView()
+ replyData := pkt.Data().AsRange().ToOwnedView()
ipHdr := header.IPv4(pkt.NetworkHeader().View())
localAddressBroadcast := pkt.NetworkPacketInfo.LocalAddressBroadcast
@@ -336,7 +336,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {
case header.ICMPv4DstUnreachable:
received.dstUnreachable.Increment()
- pkt.Data.TrimFront(header.ICMPv4MinimumSize)
+ pkt.Data().TrimFront(header.ICMPv4MinimumSize)
switch h.Code() {
case header.ICMPv4HostUnreachable:
e.handleControl(&icmpv4DestinationHostUnreachableSockError{}, pkt)
@@ -571,7 +571,7 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) tcpip
return nil
}
- payloadLen := len(origIPHdr) + transportHeader.Size() + pkt.Data.Size()
+ payloadLen := len(origIPHdr) + transportHeader.Size() + pkt.Data().Size()
if payloadLen > available {
payloadLen = available
}
@@ -586,8 +586,11 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) tcpip
newHeader := append(buffer.View(nil), origIPHdr...)
newHeader = append(newHeader, transportHeader...)
payload := newHeader.ToVectorisedView()
- payload.AppendView(pkt.Data.ToView())
- payload.CapLength(payloadLen)
+ if dataCap := payloadLen - payload.Size(); dataCap > 0 {
+ payload.AppendView(pkt.Data().AsRange().Capped(dataCap).ToOwnedView())
+ } else {
+ payload.CapLength(payloadLen)
+ }
icmpPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
ReserveHeaderBytes: int(route.MaxHeaderLength()) + header.ICMPv4MinimumSize,
@@ -623,7 +626,7 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) tcpip
default:
panic(fmt.Sprintf("unsupported ICMP type %T", reason))
}
- icmpHdr.SetChecksum(header.ICMPv4Checksum(icmpHdr, icmpPkt.Data))
+ icmpHdr.SetChecksum(header.ICMPv4Checksum(icmpHdr, icmpPkt.Data().AsRange().Checksum()))
if err := route.WritePacket(
nil, /* gso */
diff --git a/pkg/tcpip/network/ipv4/igmp.go b/pkg/tcpip/network/ipv4/igmp.go
index 0a15ae897..f3fc1c87e 100644
--- a/pkg/tcpip/network/ipv4/igmp.go
+++ b/pkg/tcpip/network/ipv4/igmp.go
@@ -197,7 +197,7 @@ func (igmp *igmpState) isPacketValidLocked(pkt *stack.PacketBuffer, messageType
// Precondition: igmp.ep.mu must be locked.
func (igmp *igmpState) handleIGMP(pkt *stack.PacketBuffer, hasRouterAlertOption bool) {
received := igmp.ep.stats.igmp.packetsReceived
- headerView, ok := pkt.Data.PullUp(header.IGMPMinimumSize)
+ headerView, ok := pkt.Data().PullUp(header.IGMPMinimumSize)
if !ok {
received.invalid.Increment()
return
@@ -210,7 +210,7 @@ func (igmp *igmpState) handleIGMP(pkt *stack.PacketBuffer, hasRouterAlertOption
// same set of octets, including the checksum field. If the result
// is all 1 bits (-0 in 1's complement arithmetic), the check
// succeeds.
- if header.ChecksumVV(pkt.Data, 0 /* initial */) != 0xFFFF {
+ if pkt.Data().AsRange().Checksum() != 0xFFFF {
received.checksumErrors.Increment()
return
}
diff --git a/pkg/tcpip/network/ipv4/igmp_test.go b/pkg/tcpip/network/ipv4/igmp_test.go
index c5f68e411..e5e1b89cc 100644
--- a/pkg/tcpip/network/ipv4/igmp_test.go
+++ b/pkg/tcpip/network/ipv4/igmp_test.go
@@ -106,9 +106,9 @@ func createAndInjectIGMPPacket(e *channel.Endpoint, igmpType header.IGMPType, ma
igmp.SetGroupAddress(groupAddress)
igmp.SetChecksum(header.IGMPCalculateChecksum(igmp))
- e.InjectInbound(ipv4.ProtocolNumber, &stack.PacketBuffer{
+ e.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
Data: buf.ToVectorisedView(),
- })
+ }))
}
// TestIGMPV1Present tests the node's ability to fallback to V1 when a V1
diff --git a/pkg/tcpip/network/ipv4/ipv4.go b/pkg/tcpip/network/ipv4/ipv4.go
index 4a429ea6c..8a2140ebe 100644
--- a/pkg/tcpip/network/ipv4/ipv4.go
+++ b/pkg/tcpip/network/ipv4/ipv4.go
@@ -492,7 +492,7 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe
func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error {
// The packet already has an IP header, but there are a few required
// checks.
- h, ok := pkt.Data.PullUp(header.IPv4MinimumSize)
+ h, ok := pkt.Data().PullUp(header.IPv4MinimumSize)
if !ok {
return &tcpip.ErrMalformedHeader{}
}
@@ -502,14 +502,14 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBu
return &tcpip.ErrMalformedHeader{}
}
- h, ok = pkt.Data.PullUp(int(hdrLen))
+ h, ok = pkt.Data().PullUp(int(hdrLen))
if !ok {
return &tcpip.ErrMalformedHeader{}
}
ip := header.IPv4(h)
// Always set the total length.
- pktSize := pkt.Data.Size()
+ pktSize := pkt.Data().Size()
ip.SetTotalLength(uint16(pktSize))
// Set the source address when zero.
@@ -687,7 +687,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
stats := e.stats
h := header.IPv4(pkt.NetworkHeader().View())
- if !h.IsValid(pkt.Data.Size() + pkt.NetworkHeader().View().Size() + pkt.TransportHeader().View().Size()) {
+ if !h.IsValid(pkt.Data().Size() + pkt.NetworkHeader().View().Size() + pkt.TransportHeader().View().Size()) {
stats.ip.MalformedPacketsReceived.Increment()
return
}
@@ -765,7 +765,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
}
if h.More() || h.FragmentOffset() != 0 {
- if pkt.Data.Size()+pkt.TransportHeader().View().Size() == 0 {
+ if pkt.Data().Size()+pkt.TransportHeader().View().Size() == 0 {
// Drop the packet as it's marked as a fragment but has
// no payload.
stats.ip.MalformedPacketsReceived.Increment()
@@ -793,10 +793,10 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
// maximum payload size.
//
// Note that this addition doesn't overflow even on 32bit architecture
- // because pkt.Data.Size() should not exceed 65535 (the max IP datagram
+ // because pkt.Data().Size() should not exceed 65535 (the max IP datagram
// size). Otherwise the packet would've been rejected as invalid before
// reaching here.
- if int(start)+pkt.Data.Size() > header.IPv4MaximumPayloadSize {
+ if int(start)+pkt.Data().Size() > header.IPv4MaximumPayloadSize {
stats.ip.MalformedPacketsReceived.Increment()
stats.ip.MalformedFragmentsReceived.Increment()
return
@@ -813,7 +813,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
Protocol: proto,
},
start,
- start+uint16(pkt.Data.Size())-1,
+ start+uint16(pkt.Data().Size())-1,
h.More(),
proto,
pkt,
@@ -831,7 +831,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
// The reassembler doesn't take care of fixing up the header, so we need
// to do it here.
- h.SetTotalLength(uint16(pkt.Data.Size() + len((h))))
+ h.SetTotalLength(uint16(pkt.Data().Size() + len((h))))
h.SetFlagsFragmentOffset(0, 0)
}
stats.ip.PacketsDelivered.Increment()
@@ -899,10 +899,9 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
// Close cleans up resources associated with the endpoint.
func (e *endpoint) Close() {
e.mu.Lock()
- defer e.mu.Unlock()
-
e.disableLocked()
e.mu.addressableEndpointState.Cleanup()
+ e.mu.Unlock()
e.protocol.forgetEndpoint(e.nic.ID())
}
@@ -1186,7 +1185,7 @@ func calculateNetworkMTU(linkMTU, networkHeaderSize uint32) (uint32, tcpip.Error
}
func packetMustBeFragmented(pkt *stack.PacketBuffer, networkMTU uint32, gso *stack.GSO) bool {
- payload := pkt.TransportHeader().View().Size() + pkt.Data.Size()
+ payload := pkt.TransportHeader().View().Size() + pkt.Data().Size()
return (gso == nil || gso.Type == stack.GSONone) && uint32(payload) > networkMTU
}
diff --git a/pkg/tcpip/network/ipv4/ipv4_test.go b/pkg/tcpip/network/ipv4/ipv4_test.go
index dc4db6e5f..cfed241bf 100644
--- a/pkg/tcpip/network/ipv4/ipv4_test.go
+++ b/pkg/tcpip/network/ipv4/ipv4_test.go
@@ -26,12 +26,14 @@ import (
"time"
"github.com/google/go-cmp/cmp"
+ "gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/buffer"
"gvisor.dev/gvisor/pkg/tcpip/checker"
"gvisor.dev/gvisor/pkg/tcpip/faketime"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
+ "gvisor.dev/gvisor/pkg/tcpip/link/loopback"
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
"gvisor.dev/gvisor/pkg/tcpip/network/arp"
"gvisor.dev/gvisor/pkg/tcpip/network/internal/testutil"
@@ -1211,7 +1213,7 @@ func compareFragments(packets []*stack.PacketBuffer, sourcePacket *stack.PacketB
sourceCopy.SetFlagsFragmentOffset(sourceCopy.Flags()&^header.IPv4FlagMoreFragments, wantFragments[i].offset)
}
reassembledPayload.AppendView(packet.TransportHeader().View())
- reassembledPayload.Append(packet.Data)
+ reassembledPayload.AppendView(packet.Data().AsRange().ToOwnedView())
// Clear out the checksum and length from the ip because we can't compare
// it.
sourceCopy.SetTotalLength(wantFragments[i].payloadSize + header.IPv4MinimumSize)
@@ -2985,3 +2987,120 @@ func TestPacketQueing(t *testing.T) {
})
}
}
+
+// TestCloseLocking test that lock ordering is followed when closing an
+// endpoint.
+func TestCloseLocking(t *testing.T) {
+ const (
+ nicID1 = 1
+ nicID2 = 2
+
+ src = tcpip.Address("\x10\x00\x00\x01")
+ dst = tcpip.Address("\x10\x00\x00\x02")
+
+ iterations = 1000
+ )
+
+ s := stack.New(stack.Options{
+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
+ TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
+ })
+
+ // Perform NAT so that the endoint tries to search for a sibling endpoint
+ // which ends up taking the protocol and endpoint lock (in that order).
+ table := stack.Table{
+ Rules: []stack.Rule{
+ {Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+ {Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+ {Target: &stack.RedirectTarget{Port: 5, NetworkProtocol: header.IPv4ProtocolNumber}},
+ {Target: &stack.AcceptTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+ {Target: &stack.ErrorTarget{NetworkProtocol: header.IPv4ProtocolNumber}},
+ },
+ BuiltinChains: [stack.NumHooks]int{
+ stack.Prerouting: 0,
+ stack.Input: 1,
+ stack.Forward: stack.HookUnset,
+ stack.Output: 2,
+ stack.Postrouting: 3,
+ },
+ Underflows: [stack.NumHooks]int{
+ stack.Prerouting: 0,
+ stack.Input: 1,
+ stack.Forward: stack.HookUnset,
+ stack.Output: 2,
+ stack.Postrouting: 3,
+ },
+ }
+ if err := s.IPTables().ReplaceTable(stack.NATID, table, false /* ipv6 */); err != nil {
+ t.Fatalf("s.IPTables().ReplaceTable(...): %s", err)
+ }
+
+ e := channel.New(0, defaultMTU, "")
+ if err := s.CreateNIC(nicID1, e); err != nil {
+ t.Fatalf("CreateNIC(%d, _): %s", nicID1, err)
+ }
+
+ if err := s.AddAddress(nicID1, ipv4.ProtocolNumber, src); err != nil {
+ t.Fatalf("AddAddress(%d, %d, %s) failed: %s", nicID1, ipv4.ProtocolNumber, src, err)
+ }
+
+ s.SetRouteTable([]tcpip.Route{{
+ Destination: header.IPv4EmptySubnet,
+ NIC: nicID1,
+ }})
+
+ var wq waiter.Queue
+ ep, err := s.NewEndpoint(udp.ProtocolNumber, ipv4.ProtocolNumber, &wq)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer ep.Close()
+
+ addr := tcpip.FullAddress{NIC: nicID1, Addr: dst, Port: 53}
+ if err := ep.Connect(addr); err != nil {
+ t.Errorf("ep.Connect(%#v): %s", addr, err)
+ }
+
+ var wg sync.WaitGroup
+ defer wg.Wait()
+
+ // Writing packets should trigger NAT which requires the stack to search the
+ // protocol for network endpoints with the destination address.
+ //
+ // Creating and removing interfaces should modify the protocol and endpoint
+ // which requires taking the locks of each.
+ //
+ // We expect the protocol > endpoint lock ordering to be followed here.
+ wg.Add(2)
+ go func() {
+ defer wg.Done()
+
+ data := []byte{1, 2, 3, 4}
+
+ for i := 0; i < iterations; i++ {
+ var r bytes.Reader
+ r.Reset(data)
+ if n, err := ep.Write(&r, tcpip.WriteOptions{}); err != nil {
+ t.Errorf("ep.Write(_, _): %s", err)
+ return
+ } else if want := int64(len(data)); n != want {
+ t.Errorf("got ep.Write(_, _) = (%d, _), want = (%d, _)", n, want)
+ return
+ }
+ }
+ }()
+ go func() {
+ defer wg.Done()
+
+ for i := 0; i < iterations; i++ {
+ if err := s.CreateNIC(nicID2, loopback.New()); err != nil {
+ t.Errorf("CreateNIC(%d, _): %s", nicID2, err)
+ return
+ }
+ if err := s.RemoveNIC(nicID2); err != nil {
+ t.Errorf("RemoveNIC(%d): %s", nicID2, err)
+ return
+ }
+ }
+ }()
+}
diff --git a/pkg/tcpip/network/ipv4/stats.go b/pkg/tcpip/network/ipv4/stats.go
index 5ae73fbfb..5798cfec6 100644
--- a/pkg/tcpip/network/ipv4/stats.go
+++ b/pkg/tcpip/network/ipv4/stats.go
@@ -52,7 +52,7 @@ type sharedStats struct {
// LINT.IfChange(multiCounterICMPv4PacketStats)
type multiCounterICMPv4PacketStats struct {
- echo tcpip.MultiCounterStat
+ echoRequest tcpip.MultiCounterStat
echoReply tcpip.MultiCounterStat
dstUnreachable tcpip.MultiCounterStat
srcQuench tcpip.MultiCounterStat
@@ -66,7 +66,7 @@ type multiCounterICMPv4PacketStats struct {
}
func (m *multiCounterICMPv4PacketStats) init(a, b *tcpip.ICMPv4PacketStats) {
- m.echo.Init(a.Echo, b.Echo)
+ m.echoRequest.Init(a.EchoRequest, b.EchoRequest)
m.echoReply.Init(a.EchoReply, b.EchoReply)
m.dstUnreachable.Init(a.DstUnreachable, b.DstUnreachable)
m.srcQuench.Init(a.SrcQuench, b.SrcQuench)
diff --git a/pkg/tcpip/network/ipv6/icmp.go b/pkg/tcpip/network/ipv6/icmp.go
index 5f44ab317..6344a3e09 100644
--- a/pkg/tcpip/network/ipv6/icmp.go
+++ b/pkg/tcpip/network/ipv6/icmp.go
@@ -18,7 +18,6 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
@@ -165,7 +164,7 @@ func (e *endpoint) checkLocalAddress(addr tcpip.Address) bool {
// used to find out which transport endpoint must be notified about the ICMP
// packet.
func (e *endpoint) handleControl(transErr stack.TransportError, pkt *stack.PacketBuffer) {
- h, ok := pkt.Data.PullUp(header.IPv6MinimumSize)
+ h, ok := pkt.Data().PullUp(header.IPv6MinimumSize)
if !ok {
return
}
@@ -184,10 +183,10 @@ func (e *endpoint) handleControl(transErr stack.TransportError, pkt *stack.Packe
// Skip the IP header, then handle the fragmentation header if there
// is one.
- pkt.Data.TrimFront(header.IPv6MinimumSize)
+ pkt.Data().TrimFront(header.IPv6MinimumSize)
p := hdr.TransportProtocol()
if p == header.IPv6FragmentHeader {
- f, ok := pkt.Data.PullUp(header.IPv6FragmentHeaderSize)
+ f, ok := pkt.Data().PullUp(header.IPv6FragmentHeaderSize)
if !ok {
return
}
@@ -200,7 +199,7 @@ func (e *endpoint) handleControl(transErr stack.TransportError, pkt *stack.Packe
// Skip fragmentation header and find out the actual protocol
// number.
- pkt.Data.TrimFront(header.IPv6FragmentHeaderSize)
+ pkt.Data().TrimFront(header.IPv6FragmentHeaderSize)
p = fragHdr.TransportProtocol()
}
@@ -268,7 +267,7 @@ func isMLDValid(pkt *stack.PacketBuffer, iph header.IPv6, routerAlert *header.IP
if routerAlert == nil || routerAlert.Value != header.IPv6RouterAlertMLD {
return false
}
- if pkt.Data.Size() < header.ICMPv6HeaderSize+header.MLDMinimumSize {
+ if pkt.Data().Size() < header.ICMPv6HeaderSize+header.MLDMinimumSize {
return false
}
if iph.HopLimit() != header.MLDHopLimit {
@@ -285,7 +284,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
received := e.stats.icmp.packetsReceived
// TODO(gvisor.dev/issue/170): ICMP packets don't have their TransportHeader
// fields set. See icmp/protocol.go:protocol.Parse for a full explanation.
- v, ok := pkt.Data.PullUp(header.ICMPv6HeaderSize)
+ v, ok := pkt.Data().PullUp(header.ICMPv6HeaderSize)
if !ok {
received.invalid.Increment()
return
@@ -296,11 +295,14 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
dstAddr := iph.DestinationAddress()
// Validate ICMPv6 checksum before processing the packet.
- //
- // This copy is used as extra payload during the checksum calculation.
- payload := pkt.Data.Clone(nil)
- payload.TrimFront(len(h))
- if got, want := h.Checksum(), header.ICMPv6Checksum(h, srcAddr, dstAddr, payload); got != want {
+ payload := pkt.Data().AsRange().SubRange(len(h))
+ if got, want := h.Checksum(), header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: h,
+ Src: srcAddr,
+ Dst: dstAddr,
+ PayloadCsum: payload.Checksum(),
+ PayloadLen: payload.Size(),
+ }); got != want {
received.invalid.Increment()
return
}
@@ -320,12 +322,12 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
switch icmpType := h.Type(); icmpType {
case header.ICMPv6PacketTooBig:
received.packetTooBig.Increment()
- hdr, ok := pkt.Data.PullUp(header.ICMPv6PacketTooBigMinimumSize)
+ hdr, ok := pkt.Data().PullUp(header.ICMPv6PacketTooBigMinimumSize)
if !ok {
received.invalid.Increment()
return
}
- pkt.Data.TrimFront(header.ICMPv6PacketTooBigMinimumSize)
+ pkt.Data().TrimFront(header.ICMPv6PacketTooBigMinimumSize)
networkMTU, err := calculateNetworkMTU(header.ICMPv6(hdr).MTU(), header.IPv6MinimumSize)
if err != nil {
networkMTU = 0
@@ -334,12 +336,12 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
case header.ICMPv6DstUnreachable:
received.dstUnreachable.Increment()
- hdr, ok := pkt.Data.PullUp(header.ICMPv6DstUnreachableMinimumSize)
+ hdr, ok := pkt.Data().PullUp(header.ICMPv6DstUnreachableMinimumSize)
if !ok {
received.invalid.Increment()
return
}
- pkt.Data.TrimFront(header.ICMPv6DstUnreachableMinimumSize)
+ pkt.Data().TrimFront(header.ICMPv6DstUnreachableMinimumSize)
switch header.ICMPv6(hdr).Code() {
case header.ICMPv6NetworkUnreachable:
e.handleControl(&icmpv6DestinationNetworkUnreachableSockError{}, pkt)
@@ -348,16 +350,16 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
}
case header.ICMPv6NeighborSolicit:
received.neighborSolicit.Increment()
- if !isNDPValid() || pkt.Data.Size() < header.ICMPv6NeighborSolicitMinimumSize {
+ if !isNDPValid() || pkt.Data().Size() < header.ICMPv6NeighborSolicitMinimumSize {
received.invalid.Increment()
return
}
// The remainder of payload must be only the neighbor solicitation, so
- // payload.ToView() always returns the solicitation. Per RFC 6980 section 5,
+ // payload.AsView() always returns the solicitation. Per RFC 6980 section 5,
// NDP messages cannot be fragmented. Also note that in the common case NDP
- // datagrams are very small and ToView() will not incur allocations.
- ns := header.NDPNeighborSolicit(payload.ToView())
+ // datagrams are very small and AsView() will not incur allocations.
+ ns := header.NDPNeighborSolicit(payload.AsView())
targetAddr := ns.TargetAddress()
// As per RFC 4861 section 4.3, the Target Address MUST NOT be a multicast
@@ -380,6 +382,10 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
// stack know so it can handle such a scenario and do nothing further with
// the NS.
if srcAddr == header.IPv6Any {
+ // Since this is a DAD message we know the sender does not actually hold
+ // the target address so there is no "holder".
+ var holderLinkAddress tcpip.LinkAddress
+
// We would get an error if the address no longer exists or the address
// is no longer tentative (DAD resolved between the call to
// hasTentativeAddr and this point). Both of these are valid scenarios:
@@ -391,7 +397,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
//
// TODO(gvisor.dev/issue/4046): Handle the scenario when a duplicate
// address is detected for an assigned address.
- switch err := e.dupTentativeAddrDetected(targetAddr); err.(type) {
+ switch err := e.dupTentativeAddrDetected(targetAddr, holderLinkAddress); err.(type) {
case nil, *tcpip.ErrBadAddress, *tcpip.ErrInvalidEndpointState:
default:
panic(fmt.Sprintf("unexpected error handling duplicate tentative address: %s", err))
@@ -529,7 +535,11 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
na.SetOverrideFlag(true)
na.SetTargetAddress(targetAddr)
na.Options().Serialize(optsSerializer)
- packet.SetChecksum(header.ICMPv6Checksum(packet, r.LocalAddress, r.RemoteAddress, buffer.VectorisedView{}))
+ packet.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: packet,
+ Src: r.LocalAddress,
+ Dst: r.RemoteAddress,
+ }))
// RFC 4861 Neighbor Discovery for IP version 6 (IPv6)
//
@@ -545,20 +555,34 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
case header.ICMPv6NeighborAdvert:
received.neighborAdvert.Increment()
- if !isNDPValid() || pkt.Data.Size() < header.ICMPv6NeighborAdvertMinimumSize {
+ if !isNDPValid() || pkt.Data().Size() < header.ICMPv6NeighborAdvertMinimumSize {
received.invalid.Increment()
return
}
// The remainder of payload must be only the neighbor advertisement, so
- // payload.ToView() always returns the advertisement. Per RFC 6980 section
+ // payload.AsView() always returns the advertisement. Per RFC 6980 section
// 5, NDP messages cannot be fragmented. Also note that in the common case
- // NDP datagrams are very small and ToView() will not incur allocations.
- na := header.NDPNeighborAdvert(payload.ToView())
+ // NDP datagrams are very small and AsView() will not incur allocations.
+ na := header.NDPNeighborAdvert(payload.AsView())
+
+ it, err := na.Options().Iter(false /* check */)
+ if err != nil {
+ // If we have a malformed NDP NA option, drop the packet.
+ received.invalid.Increment()
+ return
+ }
+
+ targetLinkAddr, ok := getTargetLinkAddr(it)
+ if !ok {
+ received.invalid.Increment()
+ return
+ }
+
targetAddr := na.TargetAddress()
e.dad.mu.Lock()
- e.dad.mu.dad.StopLocked(targetAddr, false /* aborted */)
+ e.dad.mu.dad.StopLocked(targetAddr, &stack.DADDupAddrDetected{HolderLinkAddress: targetLinkAddr})
e.dad.mu.Unlock()
if e.hasTentativeAddr(targetAddr) {
@@ -578,7 +602,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
//
// TODO(gvisor.dev/issue/4046): Handle the scenario when a duplicate
// address is detected for an assigned address.
- switch err := e.dupTentativeAddrDetected(targetAddr); err.(type) {
+ switch err := e.dupTentativeAddrDetected(targetAddr, targetLinkAddr); err.(type) {
case nil, *tcpip.ErrBadAddress, *tcpip.ErrInvalidEndpointState:
return
default:
@@ -586,13 +610,6 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
}
}
- it, err := na.Options().Iter(false /* check */)
- if err != nil {
- // If we have a malformed NDP NA option, drop the packet.
- received.invalid.Increment()
- return
- }
-
// At this point we know that the target address is not tentative on the
// NIC. However, the target address may still be assigned to the NIC but not
// tentative (it could be permanent). Such a scenario is beyond the scope of
@@ -602,11 +619,6 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
// TODO(b/143147598): Handle the scenario described above. Also inform the
// netstack integration that a duplicate address was detected outside of
// DAD.
- targetLinkAddr, ok := getTargetLinkAddr(it)
- if !ok {
- received.invalid.Increment()
- return
- }
// As per RFC 4861 section 7.1.2:
// A node MUST silently discard any received Neighbor Advertisement
@@ -657,13 +669,20 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
replyPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
ReserveHeaderBytes: int(r.MaxHeaderLength()) + header.ICMPv6EchoMinimumSize,
- Data: pkt.Data,
+ Data: pkt.Data().ExtractVV(),
})
- packet := header.ICMPv6(replyPkt.TransportHeader().Push(header.ICMPv6EchoMinimumSize))
+ icmp := header.ICMPv6(replyPkt.TransportHeader().Push(header.ICMPv6EchoMinimumSize))
pkt.TransportProtocolNumber = header.ICMPv6ProtocolNumber
- copy(packet, icmpHdr)
- packet.SetType(header.ICMPv6EchoReply)
- packet.SetChecksum(header.ICMPv6Checksum(packet, r.LocalAddress, r.RemoteAddress, pkt.Data))
+ copy(icmp, icmpHdr)
+ icmp.SetType(header.ICMPv6EchoReply)
+ dataRange := replyPkt.Data().AsRange()
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: r.LocalAddress,
+ Dst: r.RemoteAddress,
+ PayloadCsum: dataRange.Checksum(),
+ PayloadLen: dataRange.Size(),
+ }))
if err := r.WritePacket(nil /* gso */, stack.NetworkHeaderParams{
Protocol: header.ICMPv6ProtocolNumber,
TTL: r.DefaultTTL(),
@@ -676,7 +695,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
case header.ICMPv6EchoReply:
received.echoReply.Increment()
- if pkt.Data.Size() < header.ICMPv6EchoMinimumSize {
+ if pkt.Data().Size() < header.ICMPv6EchoMinimumSize {
received.invalid.Increment()
return
}
@@ -696,7 +715,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
//
// Is the NDP payload of sufficient size to hold a Router Solictation?
- if !isNDPValid() || pkt.Data.Size()-header.ICMPv6HeaderSize < header.NDPRSMinimumSize {
+ if !isNDPValid() || pkt.Data().Size()-header.ICMPv6HeaderSize < header.NDPRSMinimumSize {
received.invalid.Increment()
return
}
@@ -710,9 +729,9 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
return
}
- // Note that in the common case NDP datagrams are very small and ToView()
+ // Note that in the common case NDP datagrams are very small and AsView()
// will not incur allocations.
- rs := header.NDPRouterSolicit(payload.ToView())
+ rs := header.NDPRouterSolicit(payload.AsView())
it, err := rs.Options().Iter(false /* check */)
if err != nil {
// Options are not valid as per the wire format, silently drop the packet.
@@ -756,7 +775,7 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
//
// Is the NDP payload of sufficient size to hold a Router Advertisement?
- if !isNDPValid() || pkt.Data.Size()-header.ICMPv6HeaderSize < header.NDPRAMinimumSize {
+ if !isNDPValid() || pkt.Data().Size()-header.ICMPv6HeaderSize < header.NDPRAMinimumSize {
received.invalid.Increment()
return
}
@@ -770,9 +789,9 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
return
}
- // Note that in the common case NDP datagrams are very small and ToView()
+ // Note that in the common case NDP datagrams are very small and AsView()
// will not incur allocations.
- ra := header.NDPRouterAdvert(payload.ToView())
+ ra := header.NDPRouterAdvert(payload.AsView())
it, err := ra.Options().Iter(false /* check */)
if err != nil {
// Options are not valid as per the wire format, silently drop the packet.
@@ -850,11 +869,11 @@ func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, r
switch icmpType {
case header.ICMPv6MulticastListenerQuery:
e.mu.Lock()
- e.mu.mld.handleMulticastListenerQuery(header.MLD(payload.ToView()))
+ e.mu.mld.handleMulticastListenerQuery(header.MLD(payload.AsView()))
e.mu.Unlock()
case header.ICMPv6MulticastListenerReport:
e.mu.Lock()
- e.mu.mld.handleMulticastListenerReport(header.MLD(payload.ToView()))
+ e.mu.mld.handleMulticastListenerReport(header.MLD(payload.AsView()))
e.mu.Unlock()
case header.ICMPv6MulticastListenerDone:
default:
@@ -1077,13 +1096,13 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) tcpip
if available < header.IPv6MinimumSize {
return nil
}
- payloadLen := network.Size() + transport.Size() + pkt.Data.Size()
+ payloadLen := network.Size() + transport.Size() + pkt.Data().Size()
if payloadLen > available {
payloadLen = available
}
payload := network.ToVectorisedView()
payload.AppendView(transport)
- payload.Append(pkt.Data)
+ payload.Append(pkt.Data().ExtractVV())
payload.CapLength(payloadLen)
newPkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
@@ -1115,7 +1134,14 @@ func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer) tcpip
default:
panic(fmt.Sprintf("unsupported ICMP type %T", reason))
}
- icmpHdr.SetChecksum(header.ICMPv6Checksum(icmpHdr, route.LocalAddress, route.RemoteAddress, newPkt.Data))
+ dataRange := newPkt.Data().AsRange()
+ icmpHdr.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmpHdr,
+ Src: route.LocalAddress,
+ Dst: route.RemoteAddress,
+ PayloadCsum: dataRange.Checksum(),
+ PayloadLen: dataRange.Size(),
+ }))
if err := route.WritePacket(
nil, /* gso */
stack.NetworkHeaderParams{
diff --git a/pkg/tcpip/network/ipv6/icmp_test.go b/pkg/tcpip/network/ipv6/icmp_test.go
index c27164344..d4e63710c 100644
--- a/pkg/tcpip/network/ipv6/icmp_test.go
+++ b/pkg/tcpip/network/ipv6/icmp_test.go
@@ -324,7 +324,13 @@ func TestICMPCounts(t *testing.T) {
icmp := header.ICMPv6(buffer.NewView(typ.size + len(typ.extraData)))
copy(icmp[typ.size:], typ.extraData)
icmp.SetType(typ.typ)
- icmp.SetChecksum(header.ICMPv6Checksum(icmp[:typ.size], lladdr0, lladdr1, buffer.View(typ.extraData).ToVectorisedView()))
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp[:typ.size],
+ Src: lladdr0,
+ Dst: lladdr1,
+ PayloadCsum: header.Checksum(typ.extraData, 0 /* initial */),
+ PayloadLen: len(typ.extraData),
+ }))
handleICMPInIPv6(ep, lladdr1, lladdr0, icmp, typ.hopLimit, typ.includeRouterAlert)
}
@@ -498,7 +504,11 @@ func TestLinkResolution(t *testing.T) {
hdr := buffer.NewPrependable(int(r.MaxHeaderLength()) + header.IPv6MinimumSize + header.ICMPv6EchoMinimumSize)
pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6EchoMinimumSize))
pkt.SetType(header.ICMPv6EchoRequest)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, r.LocalAddress, r.RemoteAddress, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: r.LocalAddress,
+ Dst: r.RemoteAddress,
+ }))
// We can't send our payload directly over the route because that
// doesn't provoke NDP discovery.
@@ -687,7 +697,11 @@ func TestICMPChecksumValidationSimple(t *testing.T) {
copy(icmp[typ.size:], typ.extraData)
icmp.SetType(typ.typ)
if checksum {
- icmp.SetChecksum(header.ICMPv6Checksum(icmp, lladdr1, lladdr0, buffer.View{}.ToVectorisedView()))
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: lladdr1,
+ Dst: lladdr0,
+ }))
}
ip := header.IPv6(buffer.NewView(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
@@ -879,7 +893,11 @@ func TestICMPChecksumValidationWithPayload(t *testing.T) {
payloadFn(icmpHdr.Payload())
if checksum {
- icmpHdr.SetChecksum(header.ICMPv6Checksum(icmpHdr, lladdr1, lladdr0, buffer.VectorisedView{}))
+ icmpHdr.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmpHdr,
+ Src: lladdr1,
+ Dst: lladdr0,
+ }))
}
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
@@ -1058,7 +1076,13 @@ func TestICMPChecksumValidationWithPayloadMultipleViews(t *testing.T) {
payloadFn(payload)
if checksum {
- icmpHdr.SetChecksum(header.ICMPv6Checksum(icmpHdr, lladdr1, lladdr0, payload.ToVectorisedView()))
+ icmpHdr.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmpHdr,
+ Src: lladdr1,
+ Dst: lladdr0,
+ PayloadCsum: header.Checksum(payload, 0 /* initial */),
+ PayloadLen: len(payload),
+ }))
}
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
@@ -1324,7 +1348,11 @@ func TestPacketQueing(t *testing.T) {
pkt.SetType(header.ICMPv6EchoRequest)
pkt.SetCode(0)
pkt.SetChecksum(0)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, host2IPv6Addr.AddressWithPrefix.Address, host1IPv6Addr.AddressWithPrefix.Address, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: host2IPv6Addr.AddressWithPrefix.Address,
+ Dst: host1IPv6Addr.AddressWithPrefix.Address,
+ }))
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
PayloadLength: header.ICMPv6MinimumSize,
@@ -1422,7 +1450,11 @@ func TestPacketQueing(t *testing.T) {
na.Options().Serialize(header.NDPOptionsSerializer{
header.NDPTargetLinkLayerAddressOption(host2NICLinkAddr),
})
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, host2IPv6Addr.AddressWithPrefix.Address, host1IPv6Addr.AddressWithPrefix.Address, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: host2IPv6Addr.AddressWithPrefix.Address,
+ Dst: host1IPv6Addr.AddressWithPrefix.Address,
+ }))
payloadLength := hdr.UsedLength()
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
@@ -1661,7 +1693,11 @@ func TestCallsToNeighborCache(t *testing.T) {
}
icmp := test.createPacket()
- icmp.SetChecksum(header.ICMPv6Checksum(icmp, test.source, test.destination, buffer.VectorisedView{}))
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: test.source,
+ Dst: test.destination,
+ }))
handleICMPInIPv6(ep, test.source, test.destination, icmp, header.NDPHopLimit, false)
// Confirm the endpoint calls the correct NUDHandler method.
diff --git a/pkg/tcpip/network/ipv6/ipv6.go b/pkg/tcpip/network/ipv6/ipv6.go
index 7638ade35..46b6cc41a 100644
--- a/pkg/tcpip/network/ipv6/ipv6.go
+++ b/pkg/tcpip/network/ipv6/ipv6.go
@@ -348,7 +348,7 @@ func (e *endpoint) hasTentativeAddr(addr tcpip.Address) bool {
// dupTentativeAddrDetected removes the tentative address if it exists. If the
// address was generated via SLAAC, an attempt is made to generate a new
// address.
-func (e *endpoint) dupTentativeAddrDetected(addr tcpip.Address) tcpip.Error {
+func (e *endpoint) dupTentativeAddrDetected(addr tcpip.Address, holderLinkAddr tcpip.LinkAddress) tcpip.Error {
e.mu.Lock()
defer e.mu.Unlock()
@@ -363,7 +363,7 @@ func (e *endpoint) dupTentativeAddrDetected(addr tcpip.Address) tcpip.Error {
// If the address is a SLAAC address, do not invalidate its SLAAC prefix as an
// attempt will be made to generate a new address for it.
- if err := e.removePermanentEndpointLocked(addressEndpoint, false /* allowSLAACInvalidation */, true /* dadFailure */); err != nil {
+ if err := e.removePermanentEndpointLocked(addressEndpoint, false /* allowSLAACInvalidation */, &stack.DADDupAddrDetected{HolderLinkAddress: holderLinkAddr}); err != nil {
return err
}
@@ -536,8 +536,20 @@ func (e *endpoint) disableLocked() {
}
e.mu.ndp.stopSolicitingRouters()
+ // Stop DAD for all the tentative unicast addresses.
+ e.mu.addressableEndpointState.ForEachEndpoint(func(addressEndpoint stack.AddressEndpoint) bool {
+ if addressEndpoint.GetKind() != stack.PermanentTentative {
+ return true
+ }
+
+ addr := addressEndpoint.AddressWithPrefix().Address
+ if header.IsV6UnicastAddress(addr) {
+ e.mu.ndp.stopDuplicateAddressDetection(addr, &stack.DADAborted{})
+ }
+
+ return true
+ })
e.mu.ndp.cleanupState(false /* hostOnly */)
- e.stopDADForPermanentAddressesLocked()
// The endpoint may have already left the multicast group.
switch err := e.leaveGroupLocked(header.IPv6AllNodesMulticastAddress); err.(type) {
@@ -555,25 +567,6 @@ func (e *endpoint) disableLocked() {
}
}
-// stopDADForPermanentAddressesLocked stops DAD for all permaneent addresses.
-//
-// Precondition: e.mu must be write locked.
-func (e *endpoint) stopDADForPermanentAddressesLocked() {
- // Stop DAD for all the tentative unicast addresses.
- e.mu.addressableEndpointState.ForEachEndpoint(func(addressEndpoint stack.AddressEndpoint) bool {
- if addressEndpoint.GetKind() != stack.PermanentTentative {
- return true
- }
-
- addr := addressEndpoint.AddressWithPrefix().Address
- if header.IsV6UnicastAddress(addr) {
- e.mu.ndp.stopDuplicateAddressDetection(addr, false /* failed */)
- }
-
- return true
- })
-}
-
// DefaultTTL is the default hop limit for this endpoint.
func (e *endpoint) DefaultTTL() uint8 {
return e.protocol.DefaultTTL()
@@ -619,7 +612,7 @@ func addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params
}
func packetMustBeFragmented(pkt *stack.PacketBuffer, networkMTU uint32, gso *stack.GSO) bool {
- payload := pkt.TransportHeader().View().Size() + pkt.Data.Size()
+ payload := pkt.TransportHeader().View().Size() + pkt.Data().Size()
return (gso == nil || gso.Type == stack.GSONone) && uint32(payload) > networkMTU
}
@@ -819,14 +812,14 @@ func (e *endpoint) WritePackets(r *stack.Route, gso *stack.GSO, pkts stack.Packe
// WriteHeaderIncludedPacket implements stack.NetworkEndpoint.
func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error {
// The packet already has an IP header, but there are a few required checks.
- h, ok := pkt.Data.PullUp(header.IPv6MinimumSize)
+ h, ok := pkt.Data().PullUp(header.IPv6MinimumSize)
if !ok {
return &tcpip.ErrMalformedHeader{}
}
ip := header.IPv6(h)
// Always set the payload length.
- pktSize := pkt.Data.Size()
+ pktSize := pkt.Data().Size()
ip.SetPayloadLength(uint16(pktSize - header.IPv6MinimumSize))
// Set the source address when zero.
@@ -964,7 +957,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
stats := e.stats.ip
h := header.IPv6(pkt.NetworkHeader().View())
- if !h.IsValid(pkt.Data.Size() + pkt.NetworkHeader().View().Size() + pkt.TransportHeader().View().Size()) {
+ if !h.IsValid(pkt.Data().Size() + pkt.NetworkHeader().View().Size() + pkt.TransportHeader().View().Size()) {
stats.MalformedPacketsReceived.Increment()
return
}
@@ -993,13 +986,14 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
return
}
+ // Create a VV to parse the packet. We don't plan to modify anything here.
// vv consists of:
// - Any IPv6 header bytes after the first 40 (i.e. extensions).
// - The transport header, if present.
// - Any other payload data.
vv := pkt.NetworkHeader().View()[header.IPv6MinimumSize:].ToVectorisedView()
vv.AppendView(pkt.TransportHeader().View())
- vv.Append(pkt.Data)
+ vv.AppendViews(pkt.Data().Views())
it := header.MakeIPv6PayloadIterator(header.IPv6ExtensionHeaderIdentifier(h.NextHeader()), vv)
// iptables filtering. All packets that reach here are intended for
@@ -1257,7 +1251,9 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
// have more extension headers in the reassembled payload, as per RFC
// 8200 section 4.5. We also use the NextHeader value from the first
// fragment.
- it = header.MakeIPv6PayloadIterator(header.IPv6ExtensionHeaderIdentifier(proto), pkt.Data)
+ data := pkt.Data()
+ dataVV := buffer.NewVectorisedView(data.Size(), data.Views())
+ it = header.MakeIPv6PayloadIterator(header.IPv6ExtensionHeaderIdentifier(proto), dataVV)
}
case header.IPv6DestinationOptionsExtHdr:
@@ -1314,7 +1310,7 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
// For reassembled fragments, pkt.TransportHeader is unset, so this is a
// no-op and pkt.Data begins with the transport header.
extHdr.Buf.TrimFront(pkt.TransportHeader().View().Size())
- pkt.Data = extHdr.Buf
+ pkt.Data().Replace(extHdr.Buf)
stats.PacketsDelivered.Increment()
if p := tcpip.TransportProtocolNumber(extHdr.Identifier); p == header.ICMPv6ProtocolNumber {
@@ -1381,8 +1377,6 @@ func (e *endpoint) handlePacket(pkt *stack.PacketBuffer) {
func (e *endpoint) Close() {
e.mu.Lock()
e.disableLocked()
- e.mu.ndp.removeSLAACAddresses(false /* keepLinkLocal */)
- e.stopDADForPermanentAddressesLocked()
e.mu.addressableEndpointState.Cleanup()
e.mu.Unlock()
@@ -1448,14 +1442,14 @@ func (e *endpoint) RemovePermanentAddress(addr tcpip.Address) tcpip.Error {
return &tcpip.ErrBadLocalAddress{}
}
- return e.removePermanentEndpointLocked(addressEndpoint, true /* allowSLAACInvalidation */, false /* dadFailure */)
+ return e.removePermanentEndpointLocked(addressEndpoint, true /* allowSLAACInvalidation */, &stack.DADAborted{})
}
// removePermanentEndpointLocked is like removePermanentAddressLocked except
// it works with a stack.AddressEndpoint.
//
// Precondition: e.mu must be write locked.
-func (e *endpoint) removePermanentEndpointLocked(addressEndpoint stack.AddressEndpoint, allowSLAACInvalidation, dadFailure bool) tcpip.Error {
+func (e *endpoint) removePermanentEndpointLocked(addressEndpoint stack.AddressEndpoint, allowSLAACInvalidation bool, dadResult stack.DADResult) tcpip.Error {
addr := addressEndpoint.AddressWithPrefix()
// If we are removing an address generated via SLAAC, cleanup
// its SLAAC resources and notify the integrator.
@@ -1466,16 +1460,16 @@ func (e *endpoint) removePermanentEndpointLocked(addressEndpoint stack.AddressEn
e.mu.ndp.cleanupTempSLAACAddrResourcesAndNotify(addr)
}
- return e.removePermanentEndpointInnerLocked(addressEndpoint, dadFailure)
+ return e.removePermanentEndpointInnerLocked(addressEndpoint, dadResult)
}
// removePermanentEndpointInnerLocked is like removePermanentEndpointLocked
// except it does not cleanup SLAAC address state.
//
// Precondition: e.mu must be write locked.
-func (e *endpoint) removePermanentEndpointInnerLocked(addressEndpoint stack.AddressEndpoint, dadFailure bool) tcpip.Error {
+func (e *endpoint) removePermanentEndpointInnerLocked(addressEndpoint stack.AddressEndpoint, dadResult stack.DADResult) tcpip.Error {
addr := addressEndpoint.AddressWithPrefix()
- e.mu.ndp.stopDuplicateAddressDetection(addr.Address, dadFailure)
+ e.mu.ndp.stopDuplicateAddressDetection(addr.Address, dadResult)
if err := e.mu.addressableEndpointState.RemovePermanentEndpoint(addressEndpoint); err != nil {
return err
diff --git a/pkg/tcpip/network/ipv6/ipv6_test.go b/pkg/tcpip/network/ipv6/ipv6_test.go
index 05c9f4dbf..266a53e3b 100644
--- a/pkg/tcpip/network/ipv6/ipv6_test.go
+++ b/pkg/tcpip/network/ipv6/ipv6_test.go
@@ -68,7 +68,11 @@ func testReceiveICMP(t *testing.T, s *stack.Stack, e *channel.Endpoint, src, dst
hdr := buffer.NewPrependable(header.IPv6MinimumSize + header.ICMPv6NeighborAdvertMinimumSize)
pkt := header.ICMPv6(hdr.Prepend(header.ICMPv6NeighborAdvertMinimumSize))
pkt.SetType(header.ICMPv6NeighborAdvert)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, src, dst, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: src,
+ Dst: dst,
+ }))
payloadLength := hdr.UsedLength()
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
@@ -216,7 +220,7 @@ func compareFragments(packets []*stack.PacketBuffer, sourcePacket *stack.PacketB
// Store the reassembled payload as we parse each fragment. The payload
// includes the Transport header and everything after.
reassembledPayload.AppendView(fragment.TransportHeader().View())
- reassembledPayload.Append(fragment.Data)
+ reassembledPayload.AppendView(fragment.Data().AsRange().ToOwnedView())
}
if diff := cmp.Diff(buffer.View(source[sourceIPHeadersLen:]), reassembledPayload.ToView()); diff != "" {
@@ -3065,7 +3069,11 @@ func TestForwarding(t *testing.T) {
icmp.SetType(header.ICMPv6EchoRequest)
icmp.SetCode(header.ICMPv6UnusedCode)
icmp.SetChecksum(0)
- icmp.SetChecksum(header.ICMPv6Checksum(icmp, remoteIPv6Addr1, remoteIPv6Addr2, buffer.VectorisedView{}))
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: remoteIPv6Addr1,
+ Dst: remoteIPv6Addr2,
+ }))
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
PayloadLength: header.ICMPv6MinimumSize,
diff --git a/pkg/tcpip/network/ipv6/mld.go b/pkg/tcpip/network/ipv6/mld.go
index 205e36cdd..dd153466d 100644
--- a/pkg/tcpip/network/ipv6/mld.go
+++ b/pkg/tcpip/network/ipv6/mld.go
@@ -236,7 +236,11 @@ func (mld *mldState) writePacket(destAddress, groupAddress tcpip.Address, mldTyp
localAddress = header.IPv6Any
}
- icmp.SetChecksum(header.ICMPv6Checksum(icmp, localAddress, destAddress, buffer.VectorisedView{}))
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: localAddress,
+ Dst: destAddress,
+ }))
extensionHeaders := header.IPv6ExtHdrSerializer{
header.IPv6SerializableHopByHopExtHdr{
diff --git a/pkg/tcpip/network/ipv6/mld_test.go b/pkg/tcpip/network/ipv6/mld_test.go
index f1b8d58f2..9a425e50a 100644
--- a/pkg/tcpip/network/ipv6/mld_test.go
+++ b/pkg/tcpip/network/ipv6/mld_test.go
@@ -326,11 +326,15 @@ func createAndInjectMLDPacket(e *channel.Endpoint, mldType header.ICMPv6Type, ho
mld := header.MLD(icmp.MessageBody())
mld.SetMaximumResponseDelay(0)
mld.SetMulticastAddress(header.IPv6Any)
- icmp.SetChecksum(header.ICMPv6Checksum(icmp, srcAddress, header.IPv6AllNodesMulticastAddress, buffer.VectorisedView{}))
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: srcAddress,
+ Dst: header.IPv6AllNodesMulticastAddress,
+ }))
- e.InjectInbound(ipv6.ProtocolNumber, &stack.PacketBuffer{
+ e.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
Data: buf.ToVectorisedView(),
- })
+ }))
}
func TestMLDPacketValidation(t *testing.T) {
diff --git a/pkg/tcpip/network/ipv6/ndp.go b/pkg/tcpip/network/ipv6/ndp.go
index 721269c58..d9b728878 100644
--- a/pkg/tcpip/network/ipv6/ndp.go
+++ b/pkg/tcpip/network/ipv6/ndp.go
@@ -208,16 +208,12 @@ const (
// NDPDispatcher is the interface integrators of netstack must implement to
// receive and handle NDP related events.
type NDPDispatcher interface {
- // OnDuplicateAddressDetectionStatus is called when the DAD process for an
- // address (addr) on a NIC (with ID nicID) completes. resolved is set to true
- // if DAD completed successfully (no duplicate addr detected); false otherwise
- // (addr was detected to be a duplicate on the link the NIC is a part of, or
- // it was stopped for some other reason, such as the address being removed).
- // If an error occured during DAD, err is set and resolved must be ignored.
+ // OnDuplicateAddressDetectionResult is called when the DAD process for an
+ // address on a NIC completes.
//
// This function is not permitted to block indefinitely. This function
// is also not permitted to call into the stack.
- OnDuplicateAddressDetectionStatus(nicID tcpip.NICID, addr tcpip.Address, resolved bool, err tcpip.Error)
+ OnDuplicateAddressDetectionResult(tcpip.NICID, tcpip.Address, stack.DADResult)
// OnDefaultRouterDiscovered is called when a new default router is
// discovered. Implementations must return true if the newly discovered
@@ -225,14 +221,14 @@ type NDPDispatcher interface {
//
// This function is not permitted to block indefinitely. This function
// is also not permitted to call into the stack.
- OnDefaultRouterDiscovered(nicID tcpip.NICID, addr tcpip.Address) bool
+ OnDefaultRouterDiscovered(tcpip.NICID, tcpip.Address) bool
// OnDefaultRouterInvalidated is called when a discovered default router that
// was remembered is invalidated.
//
// This function is not permitted to block indefinitely. This function
// is also not permitted to call into the stack.
- OnDefaultRouterInvalidated(nicID tcpip.NICID, addr tcpip.Address)
+ OnDefaultRouterInvalidated(tcpip.NICID, tcpip.Address)
// OnOnLinkPrefixDiscovered is called when a new on-link prefix is discovered.
// Implementations must return true if the newly discovered on-link prefix
@@ -240,14 +236,14 @@ type NDPDispatcher interface {
//
// This function is not permitted to block indefinitely. This function
// is also not permitted to call into the stack.
- OnOnLinkPrefixDiscovered(nicID tcpip.NICID, prefix tcpip.Subnet) bool
+ OnOnLinkPrefixDiscovered(tcpip.NICID, tcpip.Subnet) bool
// OnOnLinkPrefixInvalidated is called when a discovered on-link prefix that
// was remembered is invalidated.
//
// This function is not permitted to block indefinitely. This function
// is also not permitted to call into the stack.
- OnOnLinkPrefixInvalidated(nicID tcpip.NICID, prefix tcpip.Subnet)
+ OnOnLinkPrefixInvalidated(tcpip.NICID, tcpip.Subnet)
// OnAutoGenAddress is called when a new prefix with its autonomous address-
// configuration flag set is received and SLAAC was performed. Implementations
@@ -280,12 +276,12 @@ type NDPDispatcher interface {
// It is up to the caller to use the DNS Servers only for their valid
// lifetime. OnRecursiveDNSServerOption may be called for new or
// already known DNS servers. If called with known DNS servers, their
- // valid lifetimes must be refreshed to lifetime (it may be increased,
- // decreased, or completely invalidated when lifetime = 0).
+ // valid lifetimes must be refreshed to the lifetime (it may be increased,
+ // decreased, or completely invalidated when the lifetime = 0).
//
// This function is not permitted to block indefinitely. It must not
// call functions on the stack itself.
- OnRecursiveDNSServerOption(nicID tcpip.NICID, addrs []tcpip.Address, lifetime time.Duration)
+ OnRecursiveDNSServerOption(tcpip.NICID, []tcpip.Address, time.Duration)
// OnDNSSearchListOption is called when the stack learns of DNS search lists
// through NDP.
@@ -293,9 +289,9 @@ type NDPDispatcher interface {
// It is up to the caller to use the domain names in the search list
// for only their valid lifetime. OnDNSSearchListOption may be called
// with new or already known domain names. If called with known domain
- // names, their valid lifetimes must be refreshed to lifetime (it may
- // be increased, decreased or completely invalidated when lifetime = 0.
- OnDNSSearchListOption(nicID tcpip.NICID, domainNames []string, lifetime time.Duration)
+ // names, their valid lifetimes must be refreshed to the lifetime (it may
+ // be increased, decreased or completely invalidated when the lifetime = 0.
+ OnDNSSearchListOption(tcpip.NICID, []string, time.Duration)
// OnDHCPv6Configuration is called with an updated configuration that is
// available via DHCPv6 for the passed NIC.
@@ -587,15 +583,25 @@ func (ndp *ndpState) startDuplicateAddressDetection(addr tcpip.Address, addressE
panic(fmt.Sprintf("ndpdad: addr %s is no longer tentative on NIC(%d)", addr, ndp.ep.nic.ID()))
}
- if r.Resolved {
+ var dadSucceeded bool
+ switch r.(type) {
+ case *stack.DADAborted, *stack.DADError, *stack.DADDupAddrDetected:
+ dadSucceeded = false
+ case *stack.DADSucceeded:
+ dadSucceeded = true
+ default:
+ panic(fmt.Sprintf("unrecognized DAD result = %T", r))
+ }
+
+ if dadSucceeded {
addressEndpoint.SetKind(stack.Permanent)
}
if ndpDisp := ndp.ep.protocol.options.NDPDisp; ndpDisp != nil {
- ndpDisp.OnDuplicateAddressDetectionStatus(ndp.ep.nic.ID(), addr, r.Resolved, r.Err)
+ ndpDisp.OnDuplicateAddressDetectionResult(ndp.ep.nic.ID(), addr, r)
}
- if r.Resolved {
+ if dadSucceeded {
if addressEndpoint.ConfigType() == stack.AddressConfigSlaac {
// Reset the generation attempts counter as we are starting the
// generation of a new address for the SLAAC prefix.
@@ -616,7 +622,7 @@ func (ndp *ndpState) startDuplicateAddressDetection(addr tcpip.Address, addressE
// Consider DAD to have resolved even if no DAD messages were actually
// transmitted.
if ndpDisp := ndp.ep.protocol.options.NDPDisp; ndpDisp != nil {
- ndpDisp.OnDuplicateAddressDetectionStatus(ndp.ep.nic.ID(), addr, true, nil)
+ ndpDisp.OnDuplicateAddressDetectionResult(ndp.ep.nic.ID(), addr, &stack.DADSucceeded{})
}
ndp.ep.onAddressAssignedLocked(addr)
@@ -633,8 +639,8 @@ func (ndp *ndpState) startDuplicateAddressDetection(addr tcpip.Address, addressE
// of this function to handle such a scenario.
//
// The IPv6 endpoint that ndp belongs to MUST be locked.
-func (ndp *ndpState) stopDuplicateAddressDetection(addr tcpip.Address, failed bool) {
- ndp.dad.StopLocked(addr, !failed)
+func (ndp *ndpState) stopDuplicateAddressDetection(addr tcpip.Address, reason stack.DADResult) {
+ ndp.dad.StopLocked(addr, reason)
}
// handleRA handles a Router Advertisement message that arrived on the NIC
@@ -1501,7 +1507,7 @@ func (ndp *ndpState) invalidateSLAACPrefix(prefix tcpip.Subnet, state slaacPrefi
ndpDisp.OnAutoGenAddressInvalidated(ndp.ep.nic.ID(), addressEndpoint.AddressWithPrefix())
}
- if err := ndp.ep.removePermanentEndpointInnerLocked(addressEndpoint, false /* dadFailure */); err != nil {
+ if err := ndp.ep.removePermanentEndpointInnerLocked(addressEndpoint, &stack.DADAborted{}); err != nil {
panic(fmt.Sprintf("ndp: error removing stable SLAAC address %s: %s", addressEndpoint.AddressWithPrefix(), err))
}
}
@@ -1560,7 +1566,7 @@ func (ndp *ndpState) cleanupSLAACPrefixResources(prefix tcpip.Subnet, state slaa
func (ndp *ndpState) invalidateTempSLAACAddr(tempAddrs map[tcpip.Address]tempSLAACAddrState, tempAddr tcpip.Address, tempAddrState tempSLAACAddrState) {
ndp.cleanupTempSLAACAddrResourcesAndNotifyInner(tempAddrs, tempAddr, tempAddrState)
- if err := ndp.ep.removePermanentEndpointInnerLocked(tempAddrState.addressEndpoint, false /* dadFailure */); err != nil {
+ if err := ndp.ep.removePermanentEndpointInnerLocked(tempAddrState.addressEndpoint, &stack.DADAborted{}); err != nil {
panic(fmt.Sprintf("error removing temporary SLAAC address %s: %s", tempAddrState.addressEndpoint.AddressWithPrefix(), err))
}
}
@@ -1721,7 +1727,11 @@ func (ndp *ndpState) startSolicitingRouters() {
icmpData.SetType(header.ICMPv6RouterSolicit)
rs := header.NDPRouterSolicit(icmpData.MessageBody())
rs.Options().Serialize(optsSerializer)
- icmpData.SetChecksum(header.ICMPv6Checksum(icmpData, localAddr, header.IPv6AllRoutersMulticastAddress, buffer.VectorisedView{}))
+ icmpData.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmpData,
+ Src: localAddr,
+ Dst: header.IPv6AllRoutersMulticastAddress,
+ }))
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
ReserveHeaderBytes: int(ndp.ep.MaxHeaderLength()),
@@ -1812,7 +1822,11 @@ func (e *endpoint) sendNDPNS(srcAddr, dstAddr, targetAddr tcpip.Address, remoteL
ns := header.NDPNeighborSolicit(icmp.MessageBody())
ns.SetTargetAddress(targetAddr)
ns.Options().Serialize(opts)
- icmp.SetChecksum(header.ICMPv6Checksum(icmp, srcAddr, dstAddr, buffer.VectorisedView{}))
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: srcAddr,
+ Dst: dstAddr,
+ }))
pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
ReserveHeaderBytes: int(e.MaxHeaderLength()),
diff --git a/pkg/tcpip/network/ipv6/ndp_test.go b/pkg/tcpip/network/ipv6/ndp_test.go
index ce20af0e3..6e850fd46 100644
--- a/pkg/tcpip/network/ipv6/ndp_test.go
+++ b/pkg/tcpip/network/ipv6/ndp_test.go
@@ -90,7 +90,7 @@ type testNDPDispatcher struct {
addr tcpip.Address
}
-func (*testNDPDispatcher) OnDuplicateAddressDetectionStatus(tcpip.NICID, tcpip.Address, bool, tcpip.Error) {
+func (*testNDPDispatcher) OnDuplicateAddressDetectionResult(tcpip.NICID, tcpip.Address, stack.DADResult) {
}
func (t *testNDPDispatcher) OnDefaultRouterDiscovered(_ tcpip.NICID, addr tcpip.Address) bool {
@@ -215,7 +215,11 @@ func TestNeighborSolicitationWithSourceLinkLayerOption(t *testing.T) {
ns.SetTargetAddress(lladdr0)
opts := ns.Options()
copy(opts, test.optsBuf)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, lladdr1, lladdr0, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: lladdr1,
+ Dst: lladdr0,
+ }))
payloadLength := hdr.UsedLength()
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
@@ -478,7 +482,11 @@ func TestNeighborSolicitationResponse(t *testing.T) {
ns.SetTargetAddress(nicAddr)
opts := ns.Options()
opts.Serialize(test.nsOpts)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, test.nsSrc, test.nsDst, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: test.nsSrc,
+ Dst: test.nsDst,
+ }))
payloadLength := hdr.UsedLength()
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
@@ -554,7 +562,11 @@ func TestNeighborSolicitationResponse(t *testing.T) {
na.SetOverrideFlag(true)
na.SetTargetAddress(test.nsSrc)
na.Options().Serialize(ser)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, test.nsSrc, nicAddr, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: test.nsSrc,
+ Dst: nicAddr,
+ }))
payloadLength := hdr.UsedLength()
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
@@ -657,7 +669,11 @@ func TestNeighborAdvertisementWithTargetLinkLayerOption(t *testing.T) {
ns.SetTargetAddress(lladdr1)
opts := ns.Options()
copy(opts, test.optsBuf)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, lladdr1, lladdr0, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: lladdr1,
+ Dst: lladdr0,
+ }))
payloadLength := hdr.UsedLength()
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
@@ -874,7 +890,13 @@ func TestNDPValidation(t *testing.T) {
copy(icmp[typ.size:], typ.extraData)
icmp.SetType(typ.typ)
icmp.SetCode(test.code)
- icmp.SetChecksum(header.ICMPv6Checksum(icmp[:typ.size], lladdr0, lladdr1, buffer.View(typ.extraData).ToVectorisedView()))
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp[:typ.size],
+ Src: lladdr0,
+ Dst: lladdr1,
+ PayloadCsum: header.Checksum(typ.extraData /* initial */, 0),
+ PayloadLen: len(typ.extraData),
+ }))
// Rx count of the NDP message should initially be 0.
if got := typStat.Value(); got != 0 {
@@ -987,7 +1009,11 @@ func TestNeighborAdvertisementValidation(t *testing.T) {
na := header.NDPNeighborAdvert(pkt.MessageBody())
na.SetTargetAddress(lladdr1)
na.SetSolicitedFlag(test.solicitedFlag)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, lladdr1, test.ipDstAddr, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: lladdr1,
+ Dst: test.ipDstAddr,
+ }))
payloadLength := hdr.UsedLength()
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
@@ -1182,7 +1208,11 @@ func TestRouterAdvertValidation(t *testing.T) {
pkt.SetCode(test.code)
copy(pkt.MessageBody(), test.ndpPayload)
payloadLength := hdr.UsedLength()
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, test.src, header.IPv6AllNodesMulticastAddress, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: test.src,
+ Dst: header.IPv6AllNodesMulticastAddress,
+ }))
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
PayloadLength: uint16(payloadLength),
@@ -1284,10 +1314,10 @@ func TestCheckDuplicateAddress(t *testing.T) {
t.Fatalf("got s.CheckDuplicateAddress(%d, %d, %s, _) = %d, want = %d", nicID, ProtocolNumber, lladdr0, res, stack.DADAlreadyRunning)
}
- // Wait for DAD to resolve.
+ // Wait for DAD to complete.
clock.Advance(time.Duration(dadConfigs.DupAddrDetectTransmits) * dadConfigs.RetransmitTimer)
for i := 0; i < dadRequestsMade; i++ {
- if diff := cmp.Diff(stack.DADResult{Resolved: true}, <-ch); diff != "" {
+ if diff := cmp.Diff(&stack.DADSucceeded{}, <-ch); diff != "" {
t.Errorf("(i=%d) DAD result mismatch (-want +got):\n%s", i, diff)
}
}
diff --git a/pkg/tcpip/network/multicast_group_test.go b/pkg/tcpip/network/multicast_group_test.go
index 73913aef8..ecd5003a7 100644
--- a/pkg/tcpip/network/multicast_group_test.go
+++ b/pkg/tcpip/network/multicast_group_test.go
@@ -230,9 +230,9 @@ func createAndInjectIGMPPacket(e *channel.Endpoint, igmpType byte, maxRespTime b
igmp.SetGroupAddress(groupAddress)
igmp.SetChecksum(header.IGMPCalculateChecksum(igmp))
- e.InjectInbound(ipv4.ProtocolNumber, &stack.PacketBuffer{
+ e.InjectInbound(ipv4.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
Data: buf.ToVectorisedView(),
- })
+ }))
}
// createAndInjectMLDPacket creates and injects an MLD packet with the
@@ -263,11 +263,15 @@ func createAndInjectMLDPacket(e *channel.Endpoint, mldType uint8, maxRespDelay b
mld := header.MLD(icmp.MessageBody())
mld.SetMaximumResponseDelay(uint16(maxRespDelay))
mld.SetMulticastAddress(groupAddress)
- icmp.SetChecksum(header.ICMPv6Checksum(icmp, linkLocalIPv6Addr2, header.IPv6AllNodesMulticastAddress, buffer.VectorisedView{}))
+ icmp.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: linkLocalIPv6Addr2,
+ Dst: header.IPv6AllNodesMulticastAddress,
+ }))
- e.InjectInbound(ipv6.ProtocolNumber, &stack.PacketBuffer{
+ e.InjectInbound(ipv6.ProtocolNumber, stack.NewPacketBuffer(stack.PacketBufferOptions{
Data: buf.ToVectorisedView(),
- })
+ }))
}
// TestMGPDisabled tests that the multicast group protocol is not enabled by
diff --git a/pkg/tcpip/ports/BUILD b/pkg/tcpip/ports/BUILD
index 57abec5c9..210262703 100644
--- a/pkg/tcpip/ports/BUILD
+++ b/pkg/tcpip/ports/BUILD
@@ -4,7 +4,10 @@ package(licenses = ["notice"])
go_library(
name = "ports",
- srcs = ["ports.go"],
+ srcs = [
+ "flags.go",
+ "ports.go",
+ ],
visibility = ["//visibility:public"],
deps = [
"//pkg/sync",
diff --git a/pkg/tcpip/ports/flags.go b/pkg/tcpip/ports/flags.go
new file mode 100644
index 000000000..a8d7bff25
--- /dev/null
+++ b/pkg/tcpip/ports/flags.go
@@ -0,0 +1,150 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ports
+
+// Flags represents the type of port reservation.
+//
+// +stateify savable
+type Flags struct {
+ // MostRecent represents UDP SO_REUSEADDR.
+ MostRecent bool
+
+ // LoadBalanced indicates SO_REUSEPORT.
+ //
+ // LoadBalanced takes precidence over MostRecent.
+ LoadBalanced bool
+
+ // TupleOnly represents TCP SO_REUSEADDR.
+ TupleOnly bool
+}
+
+// Bits converts the Flags to their bitset form.
+func (f Flags) Bits() BitFlags {
+ var rf BitFlags
+ if f.MostRecent {
+ rf |= MostRecentFlag
+ }
+ if f.LoadBalanced {
+ rf |= LoadBalancedFlag
+ }
+ if f.TupleOnly {
+ rf |= TupleOnlyFlag
+ }
+ return rf
+}
+
+// Effective returns the effective behavior of a flag config.
+func (f Flags) Effective() Flags {
+ e := f
+ if e.LoadBalanced && e.MostRecent {
+ e.MostRecent = false
+ }
+ return e
+}
+
+// BitFlags is a bitset representation of Flags.
+type BitFlags uint32
+
+const (
+ // MostRecentFlag represents Flags.MostRecent.
+ MostRecentFlag BitFlags = 1 << iota
+
+ // LoadBalancedFlag represents Flags.LoadBalanced.
+ LoadBalancedFlag
+
+ // TupleOnlyFlag represents Flags.TupleOnly.
+ TupleOnlyFlag
+
+ // nextFlag is the value that the next added flag will have.
+ //
+ // It is used to calculate FlagMask below. It is also the number of
+ // valid flag states.
+ nextFlag
+
+ // FlagMask is a bit mask for BitFlags.
+ FlagMask = nextFlag - 1
+
+ // MultiBindFlagMask contains the flags that allow binding the same
+ // tuple multiple times.
+ MultiBindFlagMask = MostRecentFlag | LoadBalancedFlag
+)
+
+// ToFlags converts the bitset into a Flags struct.
+func (f BitFlags) ToFlags() Flags {
+ return Flags{
+ MostRecent: f&MostRecentFlag != 0,
+ LoadBalanced: f&LoadBalancedFlag != 0,
+ TupleOnly: f&TupleOnlyFlag != 0,
+ }
+}
+
+// FlagCounter counts how many references each flag combination has.
+type FlagCounter struct {
+ // refs stores the count for each possible flag combination, (0 though
+ // FlagMask).
+ refs [nextFlag]int
+}
+
+// AddRef increases the reference count for a specific flag combination.
+func (c *FlagCounter) AddRef(flags BitFlags) {
+ c.refs[flags]++
+}
+
+// DropRef decreases the reference count for a specific flag combination.
+func (c *FlagCounter) DropRef(flags BitFlags) {
+ c.refs[flags]--
+}
+
+// TotalRefs calculates the total number of references for all flag
+// combinations.
+func (c FlagCounter) TotalRefs() int {
+ var total int
+ for _, r := range c.refs {
+ total += r
+ }
+ return total
+}
+
+// FlagRefs returns the number of references with all specified flags.
+func (c FlagCounter) FlagRefs(flags BitFlags) int {
+ var total int
+ for i, r := range c.refs {
+ if BitFlags(i)&flags == flags {
+ total += r
+ }
+ }
+ return total
+}
+
+// AllRefsHave returns if all references have all specified flags.
+func (c FlagCounter) AllRefsHave(flags BitFlags) bool {
+ for i, r := range c.refs {
+ if BitFlags(i)&flags != flags && r > 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// SharedFlags returns the set of flags shared by all references.
+func (c FlagCounter) SharedFlags() BitFlags {
+ intersection := FlagMask
+ for i, r := range c.refs {
+ if r > 0 {
+ intersection &= BitFlags(i)
+ }
+ }
+ return intersection
+}
diff --git a/pkg/tcpip/ports/ports.go b/pkg/tcpip/ports/ports.go
index 11dbdbbcf..678199371 100644
--- a/pkg/tcpip/ports/ports.go
+++ b/pkg/tcpip/ports/ports.go
@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Package ports provides PortManager that manages allocating, reserving and releasing ports.
+// Package ports provides PortManager that manages allocating, reserving and
+// releasing ports.
package ports
import (
- "math"
"math/rand"
"sync/atomic"
@@ -24,169 +24,44 @@ import (
"gvisor.dev/gvisor/pkg/tcpip"
)
-const (
- // FirstEphemeral is the first ephemeral port.
- FirstEphemeral = 16000
+const anyIPAddress tcpip.Address = ""
- // numEphemeralPorts it the mnumber of available ephemeral ports to
- // Netstack.
- numEphemeralPorts = math.MaxUint16 - FirstEphemeral + 1
+// Reservation describes a port reservation.
+type Reservation struct {
+ // Networks is a list of network protocols to which the reservation
+ // applies. Can be IPv4, IPv6, or both.
+ Networks []tcpip.NetworkProtocolNumber
- anyIPAddress tcpip.Address = ""
-)
-
-type portDescriptor struct {
- network tcpip.NetworkProtocolNumber
- transport tcpip.TransportProtocolNumber
- port uint16
-}
-
-// Flags represents the type of port reservation.
-//
-// +stateify savable
-type Flags struct {
- // MostRecent represents UDP SO_REUSEADDR.
- MostRecent bool
-
- // LoadBalanced indicates SO_REUSEPORT.
- //
- // LoadBalanced takes precidence over MostRecent.
- LoadBalanced bool
-
- // TupleOnly represents TCP SO_REUSEADDR.
- TupleOnly bool
-}
-
-// Bits converts the Flags to their bitset form.
-func (f Flags) Bits() BitFlags {
- var rf BitFlags
- if f.MostRecent {
- rf |= MostRecentFlag
- }
- if f.LoadBalanced {
- rf |= LoadBalancedFlag
- }
- if f.TupleOnly {
- rf |= TupleOnlyFlag
- }
- return rf
-}
-
-// Effective returns the effective behavior of a flag config.
-func (f Flags) Effective() Flags {
- e := f
- if e.LoadBalanced && e.MostRecent {
- e.MostRecent = false
- }
- return e
-}
-
-// PortManager manages allocating, reserving and releasing ports.
-type PortManager struct {
- mu sync.RWMutex
- allocatedPorts map[portDescriptor]bindAddresses
-
- // hint is used to pick ports ephemeral ports in a stable order for
- // a given port offset.
- //
- // hint must be accessed using the portHint/incPortHint helpers.
- // TODO(gvisor.dev/issue/940): S/R this field.
- hint uint32
-}
-
-// BitFlags is a bitset representation of Flags.
-type BitFlags uint32
-
-const (
- // MostRecentFlag represents Flags.MostRecent.
- MostRecentFlag BitFlags = 1 << iota
-
- // LoadBalancedFlag represents Flags.LoadBalanced.
- LoadBalancedFlag
-
- // TupleOnlyFlag represents Flags.TupleOnly.
- TupleOnlyFlag
-
- // nextFlag is the value that the next added flag will have.
- //
- // It is used to calculate FlagMask below. It is also the number of
- // valid flag states.
- nextFlag
-
- // FlagMask is a bit mask for BitFlags.
- FlagMask = nextFlag - 1
+ // Transport is the transport protocol to which the reservation applies.
+ Transport tcpip.TransportProtocolNumber
- // MultiBindFlagMask contains the flags that allow binding the same
- // tuple multiple times.
- MultiBindFlagMask = MostRecentFlag | LoadBalancedFlag
-)
-
-// ToFlags converts the bitset into a Flags struct.
-func (f BitFlags) ToFlags() Flags {
- return Flags{
- MostRecent: f&MostRecentFlag != 0,
- LoadBalanced: f&LoadBalancedFlag != 0,
- TupleOnly: f&TupleOnlyFlag != 0,
- }
-}
+ // Addr is the address of the local endpoint.
+ Addr tcpip.Address
-// FlagCounter counts how many references each flag combination has.
-type FlagCounter struct {
- // refs stores the count for each possible flag combination, (0 though
- // FlagMask).
- refs [nextFlag]int
-}
+ // Port is the local port number.
+ Port uint16
-// AddRef increases the reference count for a specific flag combination.
-func (c *FlagCounter) AddRef(flags BitFlags) {
- c.refs[flags]++
-}
+ // Flags describe features of the reservation.
+ Flags Flags
-// DropRef decreases the reference count for a specific flag combination.
-func (c *FlagCounter) DropRef(flags BitFlags) {
- c.refs[flags]--
-}
+ // BindToDevice is the NIC to which the reservation applies.
+ BindToDevice tcpip.NICID
-// TotalRefs calculates the total number of references for all flag
-// combinations.
-func (c FlagCounter) TotalRefs() int {
- var total int
- for _, r := range c.refs {
- total += r
- }
- return total
+ // Dest is the destination address.
+ Dest tcpip.FullAddress
}
-// FlagRefs returns the number of references with all specified flags.
-func (c FlagCounter) FlagRefs(flags BitFlags) int {
- var total int
- for i, r := range c.refs {
- if BitFlags(i)&flags == flags {
- total += r
- }
- }
- return total
-}
-
-// AllRefsHave returns if all references have all specified flags.
-func (c FlagCounter) AllRefsHave(flags BitFlags) bool {
- for i, r := range c.refs {
- if BitFlags(i)&flags != flags && r > 0 {
- return false
- }
+func (rs Reservation) dst() destination {
+ return destination{
+ rs.Dest.Addr,
+ rs.Dest.Port,
}
- return true
}
-// IntersectionRefs returns the set of flags shared by all references.
-func (c FlagCounter) IntersectionRefs() BitFlags {
- intersection := FlagMask
- for i, r := range c.refs {
- if r > 0 {
- intersection &= BitFlags(i)
- }
- }
- return intersection
+type portDescriptor struct {
+ network tcpip.NetworkProtocolNumber
+ transport tcpip.TransportProtocolNumber
+ port uint16
}
type destination struct {
@@ -194,18 +69,14 @@ type destination struct {
port uint16
}
-func makeDestination(a tcpip.FullAddress) destination {
- return destination{
- a.Addr,
- a.Port,
- }
-}
-
-// portNode is never empty. When it has no elements, it is removed from the
-// map that references it.
-type portNode map[destination]FlagCounter
+// destToCounter maps each destination to the FlagCounter that represents
+// endpoints to that destination.
+//
+// destToCounter is never empty. When it has no elements, it is removed from
+// the map that references it.
+type destToCounter map[destination]FlagCounter
-// intersectionRefs calculates the intersection of flag bit values which affect
+// intersectionFlags calculates the intersection of flag bit values which affect
// the specified destination.
//
// If no destinations are present, all flag values are returned as there are no
@@ -213,20 +84,20 @@ type portNode map[destination]FlagCounter
//
// In addition to the intersection, the number of intersecting refs is
// returned.
-func (p portNode) intersectionRefs(dst destination) (BitFlags, int) {
+func (dc destToCounter) intersectionFlags(res Reservation) (BitFlags, int) {
intersection := FlagMask
var count int
- for d, f := range p {
- if d == dst {
- intersection &= f.IntersectionRefs()
+ for dest, counter := range dc {
+ if dest == res.dst() {
+ intersection &= counter.SharedFlags()
count++
continue
}
// Wildcard destinations affect all destinations for TupleOnly.
- if d.addr == anyIPAddress || dst.addr == anyIPAddress {
+ if dest.addr == anyIPAddress || res.Dest.Addr == anyIPAddress {
// Only bitwise and the TupleOnlyFlag.
- intersection &= ((^TupleOnlyFlag) | f.IntersectionRefs())
+ intersection &= ((^TupleOnlyFlag) | counter.SharedFlags())
count++
}
}
@@ -234,27 +105,29 @@ func (p portNode) intersectionRefs(dst destination) (BitFlags, int) {
return intersection, count
}
-// deviceNode is never empty. When it has no elements, it is removed from the
+// deviceToDest maps NICs to destinations for which there are port reservations.
+//
+// deviceToDest is never empty. When it has no elements, it is removed from the
// map that references it.
-type deviceNode map[tcpip.NICID]portNode
+type deviceToDest map[tcpip.NICID]destToCounter
-// isAvailable checks whether binding is possible by device. If not binding to a
-// device, check against all FlagCounters. If binding to a specific device, check
-// against the unspecified device and the provided device.
+// isAvailable checks whether binding is possible by device. If not binding to
+// a device, check against all FlagCounters. If binding to a specific device,
+// check against the unspecified device and the provided device.
//
// If either of the port reuse flags is enabled on any of the nodes, all nodes
// sharing a port must share at least one reuse flag. This matches Linux's
// behavior.
-func (d deviceNode) isAvailable(flags Flags, bindToDevice tcpip.NICID, dst destination) bool {
- flagBits := flags.Bits()
- if bindToDevice == 0 {
+func (dd deviceToDest) isAvailable(res Reservation) bool {
+ flagBits := res.Flags.Bits()
+ if res.BindToDevice == 0 {
intersection := FlagMask
- for _, p := range d {
- i, c := p.intersectionRefs(dst)
- if c == 0 {
+ for _, dest := range dd {
+ flags, count := dest.intersectionFlags(res)
+ if count == 0 {
continue
}
- intersection &= i
+ intersection &= flags
if intersection&flagBits == 0 {
// Can't bind because the (addr,port) was
// previously bound without reuse.
@@ -266,18 +139,18 @@ func (d deviceNode) isAvailable(flags Flags, bindToDevice tcpip.NICID, dst desti
intersection := FlagMask
- if p, ok := d[0]; ok {
- var c int
- intersection, c = p.intersectionRefs(dst)
- if c > 0 && intersection&flagBits == 0 {
+ if dests, ok := dd[0]; ok {
+ var count int
+ intersection, count = dests.intersectionFlags(res)
+ if count > 0 && intersection&flagBits == 0 {
return false
}
}
- if p, ok := d[bindToDevice]; ok {
- i, c := p.intersectionRefs(dst)
- intersection &= i
- if c > 0 && intersection&flagBits == 0 {
+ if dests, ok := dd[res.BindToDevice]; ok {
+ flags, count := dests.intersectionFlags(res)
+ intersection &= flags
+ if count > 0 && intersection&flagBits == 0 {
return false
}
}
@@ -285,18 +158,18 @@ func (d deviceNode) isAvailable(flags Flags, bindToDevice tcpip.NICID, dst desti
return true
}
-// bindAddresses is a set of IP addresses.
-type bindAddresses map[tcpip.Address]deviceNode
+// addrToDevice maps IP addresses to NICs that have port reservations.
+type addrToDevice map[tcpip.Address]deviceToDest
// isAvailable checks whether an IP address is available to bind to. If the
// address is the "any" address, check all other addresses. Otherwise, just
// check against the "any" address and the provided address.
-func (b bindAddresses) isAvailable(addr tcpip.Address, flags Flags, bindToDevice tcpip.NICID, dst destination) bool {
- if addr == anyIPAddress {
- // If binding to the "any" address then check that there are no conflicts
- // with all addresses.
- for _, d := range b {
- if !d.isAvailable(flags, bindToDevice, dst) {
+func (ad addrToDevice) isAvailable(res Reservation) bool {
+ if res.Addr == anyIPAddress {
+ // If binding to the "any" address then check that there are no
+ // conflicts with all addresses.
+ for _, devices := range ad {
+ if !devices.isAvailable(res) {
return false
}
}
@@ -304,15 +177,15 @@ func (b bindAddresses) isAvailable(addr tcpip.Address, flags Flags, bindToDevice
}
// Check that there is no conflict with the "any" address.
- if d, ok := b[anyIPAddress]; ok {
- if !d.isAvailable(flags, bindToDevice, dst) {
+ if devices, ok := ad[anyIPAddress]; ok {
+ if !devices.isAvailable(res) {
return false
}
}
// Check that this is no conflict with the provided address.
- if d, ok := b[addr]; ok {
- if !d.isAvailable(flags, bindToDevice, dst) {
+ if devices, ok := ad[res.Addr]; ok {
+ if !devices.isAvailable(res) {
return false
}
}
@@ -320,50 +193,93 @@ func (b bindAddresses) isAvailable(addr tcpip.Address, flags Flags, bindToDevice
return true
}
+// PortManager manages allocating, reserving and releasing ports.
+type PortManager struct {
+ // mu protects allocatedPorts.
+ // LOCK ORDERING: mu > ephemeralMu.
+ mu sync.RWMutex
+ // allocatedPorts is a nesting of maps that ultimately map Reservations
+ // to FlagCounters describing whether the Reservation is valid and can
+ // be reused.
+ allocatedPorts map[portDescriptor]addrToDevice
+
+ // ephemeralMu protects firstEphemeral and numEphemeral.
+ ephemeralMu sync.RWMutex
+ firstEphemeral uint16
+ numEphemeral uint16
+
+ // hint is used to pick ports ephemeral ports in a stable order for
+ // a given port offset.
+ //
+ // hint must be accessed using the portHint/incPortHint helpers.
+ // TODO(gvisor.dev/issue/940): S/R this field.
+ hint uint32
+}
+
// NewPortManager creates new PortManager.
func NewPortManager() *PortManager {
- return &PortManager{allocatedPorts: make(map[portDescriptor]bindAddresses)}
+ return &PortManager{
+ allocatedPorts: make(map[portDescriptor]addrToDevice),
+ // Match Linux's default ephemeral range. See:
+ // https://github.com/torvalds/linux/blob/e54937963fa249595824439dc839c948188dea83/net/ipv4/af_inet.c#L1842
+ firstEphemeral: 32768,
+ numEphemeral: 28232,
+ }
}
+// PortTester indicates whether the passed in port is suitable. Returning an
+// error causes the function to which the PortTester is passed to return that
+// error.
+type PortTester func(port uint16) (good bool, err tcpip.Error)
+
// PickEphemeralPort randomly chooses a starting point and iterates over all
// possible ephemeral ports, allowing the caller to decide whether a given port
// is suitable for its needs, and stopping when a port is found or an error
// occurs.
-func (s *PortManager) PickEphemeralPort(testPort func(p uint16) (bool, tcpip.Error)) (port uint16, err tcpip.Error) {
- offset := uint32(rand.Int31n(numEphemeralPorts))
- return s.pickEphemeralPort(offset, numEphemeralPorts, testPort)
+func (pm *PortManager) PickEphemeralPort(testPort PortTester) (port uint16, err tcpip.Error) {
+ pm.ephemeralMu.RLock()
+ firstEphemeral := pm.firstEphemeral
+ numEphemeral := pm.numEphemeral
+ pm.ephemeralMu.RUnlock()
+
+ offset := uint16(rand.Int31n(int32(numEphemeral)))
+ return pickEphemeralPort(offset, firstEphemeral, numEphemeral, testPort)
}
-// portHint atomically reads and returns the s.hint value.
-func (s *PortManager) portHint() uint32 {
- return atomic.LoadUint32(&s.hint)
+// portHint atomically reads and returns the pm.hint value.
+func (pm *PortManager) portHint() uint16 {
+ return uint16(atomic.LoadUint32(&pm.hint))
}
-// incPortHint atomically increments s.hint by 1.
-func (s *PortManager) incPortHint() {
- atomic.AddUint32(&s.hint, 1)
+// incPortHint atomically increments pm.hint by 1.
+func (pm *PortManager) incPortHint() {
+ atomic.AddUint32(&pm.hint, 1)
}
-// PickEphemeralPortStable starts at the specified offset + s.portHint and
+// PickEphemeralPortStable starts at the specified offset + pm.portHint and
// iterates over all ephemeral ports, allowing the caller to decide whether a
// given port is suitable for its needs and stopping when a port is found or an
// error occurs.
-func (s *PortManager) PickEphemeralPortStable(offset uint32, testPort func(p uint16) (bool, tcpip.Error)) (port uint16, err tcpip.Error) {
- p, err := s.pickEphemeralPort(s.portHint()+offset, numEphemeralPorts, testPort)
+func (pm *PortManager) PickEphemeralPortStable(offset uint16, testPort PortTester) (port uint16, err tcpip.Error) {
+ pm.ephemeralMu.RLock()
+ firstEphemeral := pm.firstEphemeral
+ numEphemeral := pm.numEphemeral
+ pm.ephemeralMu.RUnlock()
+
+ p, err := pickEphemeralPort(pm.portHint()+offset, firstEphemeral, numEphemeral, testPort)
if err == nil {
- s.incPortHint()
+ pm.incPortHint()
}
return p, err
-
}
// pickEphemeralPort starts at the offset specified from the FirstEphemeral port
// and iterates over the number of ports specified by count and allows the
// caller to decide whether a given port is suitable for its needs, and stopping
// when a port is found or an error occurs.
-func (s *PortManager) pickEphemeralPort(offset, count uint32, testPort func(p uint16) (bool, tcpip.Error)) (port uint16, err tcpip.Error) {
- for i := uint32(0); i < count; i++ {
- port = uint16(FirstEphemeral + (offset+i)%count)
+func pickEphemeralPort(offset, first, count uint16, testPort PortTester) (port uint16, err tcpip.Error) {
+ for i := uint16(0); i < count; i++ {
+ port = first + (offset+i)%count
ok, err := testPort(port)
if err != nil {
return 0, err
@@ -377,144 +293,145 @@ func (s *PortManager) pickEphemeralPort(offset, count uint32, testPort func(p ui
return 0, &tcpip.ErrNoPortAvailable{}
}
-// IsPortAvailable tests if the given port is available on all given protocols.
-func (s *PortManager) IsPortAvailable(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dest tcpip.FullAddress) bool {
- s.mu.Lock()
- defer s.mu.Unlock()
- return s.isPortAvailableLocked(networks, transport, addr, port, flags, bindToDevice, makeDestination(dest))
-}
-
-func (s *PortManager) isPortAvailableLocked(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dst destination) bool {
- for _, network := range networks {
- desc := portDescriptor{network, transport, port}
- if addrs, ok := s.allocatedPorts[desc]; ok {
- if !addrs.isAvailable(addr, flags, bindToDevice, dst) {
- return false
- }
- }
- }
- return true
-}
-
// ReservePort marks a port/IP combination as reserved so that it cannot be
// reserved by another endpoint. If port is zero, ReservePort will search for
// an unreserved ephemeral port and reserve it, returning its value in the
// "port" return value.
//
-// An optional testPort closure can be passed in which if provided will be used
-// to test if the picked port can be used. The function should return true if
-// the port is safe to use, false otherwise.
-func (s *PortManager) ReservePort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dest tcpip.FullAddress, testPort func(port uint16) bool) (reservedPort uint16, err tcpip.Error) {
- s.mu.Lock()
- defer s.mu.Unlock()
-
- dst := makeDestination(dest)
+// An optional PortTester can be passed in which if provided will be used to
+// test if the picked port can be used. The function should return true if the
+// port is safe to use, false otherwise.
+func (pm *PortManager) ReservePort(res Reservation, testPort PortTester) (reservedPort uint16, err tcpip.Error) {
+ pm.mu.Lock()
+ defer pm.mu.Unlock()
// If a port is specified, just try to reserve it for all network
// protocols.
- if port != 0 {
- if !s.reserveSpecificPort(networks, transport, addr, port, flags, bindToDevice, dst) {
+ if res.Port != 0 {
+ if !pm.reserveSpecificPortLocked(res) {
return 0, &tcpip.ErrPortInUse{}
}
- if testPort != nil && !testPort(port) {
- s.releasePortLocked(networks, transport, addr, port, flags.Bits(), bindToDevice, dst)
- return 0, &tcpip.ErrPortInUse{}
+ if testPort != nil {
+ ok, err := testPort(res.Port)
+ if err != nil {
+ pm.releasePortLocked(res)
+ return 0, err
+ }
+ if !ok {
+ pm.releasePortLocked(res)
+ return 0, &tcpip.ErrPortInUse{}
+ }
}
- return port, nil
+ return res.Port, nil
}
// A port wasn't specified, so try to find one.
- return s.PickEphemeralPort(func(p uint16) (bool, tcpip.Error) {
- if !s.reserveSpecificPort(networks, transport, addr, p, flags, bindToDevice, dst) {
+ return pm.PickEphemeralPort(func(p uint16) (bool, tcpip.Error) {
+ res.Port = p
+ if !pm.reserveSpecificPortLocked(res) {
return false, nil
}
- if testPort != nil && !testPort(p) {
- s.releasePortLocked(networks, transport, addr, p, flags.Bits(), bindToDevice, dst)
- return false, nil
+ if testPort != nil {
+ ok, err := testPort(p)
+ if err != nil {
+ pm.releasePortLocked(res)
+ return false, err
+ }
+ if !ok {
+ pm.releasePortLocked(res)
+ return false, nil
+ }
}
return true, nil
})
}
-// reserveSpecificPort tries to reserve the given port on all given protocols.
-func (s *PortManager) reserveSpecificPort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dst destination) bool {
- if !s.isPortAvailableLocked(networks, transport, addr, port, flags, bindToDevice, dst) {
- return false
+// reserveSpecificPortLocked tries to reserve the given port on all given
+// protocols.
+func (pm *PortManager) reserveSpecificPortLocked(res Reservation) bool {
+ // Make sure the port is available.
+ for _, network := range res.Networks {
+ desc := portDescriptor{network, res.Transport, res.Port}
+ if addrs, ok := pm.allocatedPorts[desc]; ok {
+ if !addrs.isAvailable(res) {
+ return false
+ }
+ }
}
- flagBits := flags.Bits()
-
// Reserve port on all network protocols.
- for _, network := range networks {
- desc := portDescriptor{network, transport, port}
- m, ok := s.allocatedPorts[desc]
+ flagBits := res.Flags.Bits()
+ dst := res.dst()
+ for _, network := range res.Networks {
+ desc := portDescriptor{network, res.Transport, res.Port}
+ addrToDev, ok := pm.allocatedPorts[desc]
if !ok {
- m = make(bindAddresses)
- s.allocatedPorts[desc] = m
+ addrToDev = make(addrToDevice)
+ pm.allocatedPorts[desc] = addrToDev
}
- d, ok := m[addr]
+ devToDest, ok := addrToDev[res.Addr]
if !ok {
- d = make(deviceNode)
- m[addr] = d
+ devToDest = make(deviceToDest)
+ addrToDev[res.Addr] = devToDest
}
- p := d[bindToDevice]
- if p == nil {
- p = make(portNode)
+ destToCntr := devToDest[res.BindToDevice]
+ if destToCntr == nil {
+ destToCntr = make(destToCounter)
}
- n := p[dst]
- n.AddRef(flagBits)
- p[dst] = n
- d[bindToDevice] = p
+ counter := destToCntr[dst]
+ counter.AddRef(flagBits)
+ destToCntr[dst] = counter
+ devToDest[res.BindToDevice] = destToCntr
}
return true
}
// ReserveTuple adds a port reservation for the tuple on all given protocol.
-func (s *PortManager) ReserveTuple(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dest tcpip.FullAddress) bool {
- flagBits := flags.Bits()
- dst := makeDestination(dest)
+func (pm *PortManager) ReserveTuple(res Reservation) bool {
+ flagBits := res.Flags.Bits()
+ dst := res.dst()
- s.mu.Lock()
- defer s.mu.Unlock()
+ pm.mu.Lock()
+ defer pm.mu.Unlock()
// It is easier to undo the entire reservation, so if we find that the
// tuple can't be fully added, finish and undo the whole thing.
undo := false
// Reserve port on all network protocols.
- for _, network := range networks {
- desc := portDescriptor{network, transport, port}
- m, ok := s.allocatedPorts[desc]
+ for _, network := range res.Networks {
+ desc := portDescriptor{network, res.Transport, res.Port}
+ addrToDev, ok := pm.allocatedPorts[desc]
if !ok {
- m = make(bindAddresses)
- s.allocatedPorts[desc] = m
+ addrToDev = make(addrToDevice)
+ pm.allocatedPorts[desc] = addrToDev
}
- d, ok := m[addr]
+ devToDest, ok := addrToDev[res.Addr]
if !ok {
- d = make(deviceNode)
- m[addr] = d
+ devToDest = make(deviceToDest)
+ addrToDev[res.Addr] = devToDest
}
- p := d[bindToDevice]
- if p == nil {
- p = make(portNode)
+ destToCntr := devToDest[res.BindToDevice]
+ if destToCntr == nil {
+ destToCntr = make(destToCounter)
}
- n := p[dst]
- if n.TotalRefs() != 0 && n.IntersectionRefs()&flagBits == 0 {
+ counter := destToCntr[dst]
+ if counter.TotalRefs() != 0 && counter.SharedFlags()&flagBits == 0 {
// Tuple already exists.
undo = true
}
- n.AddRef(flagBits)
- p[dst] = n
- d[bindToDevice] = p
+ counter.AddRef(flagBits)
+ destToCntr[dst] = counter
+ devToDest[res.BindToDevice] = destToCntr
}
if undo {
// releasePortLocked decrements the counts (rather than setting
// them to zero), so it will undo the incorrect incrementing
// above.
- s.releasePortLocked(networks, transport, addr, port, flagBits, bindToDevice, dst)
+ pm.releasePortLocked(res)
return false
}
@@ -523,47 +440,71 @@ func (s *PortManager) ReserveTuple(networks []tcpip.NetworkProtocolNumber, trans
// ReleasePort releases the reservation on a port/IP combination so that it can
// be reserved by other endpoints.
-func (s *PortManager) ReleasePort(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags Flags, bindToDevice tcpip.NICID, dest tcpip.FullAddress) {
- s.mu.Lock()
- defer s.mu.Unlock()
+func (pm *PortManager) ReleasePort(res Reservation) {
+ pm.mu.Lock()
+ defer pm.mu.Unlock()
- s.releasePortLocked(networks, transport, addr, port, flags.Bits(), bindToDevice, makeDestination(dest))
+ pm.releasePortLocked(res)
}
-func (s *PortManager) releasePortLocked(networks []tcpip.NetworkProtocolNumber, transport tcpip.TransportProtocolNumber, addr tcpip.Address, port uint16, flags BitFlags, bindToDevice tcpip.NICID, dst destination) {
- for _, network := range networks {
- desc := portDescriptor{network, transport, port}
- if m, ok := s.allocatedPorts[desc]; ok {
- d, ok := m[addr]
- if !ok {
- continue
- }
- p, ok := d[bindToDevice]
- if !ok {
- continue
- }
- n, ok := p[dst]
- if !ok {
- continue
- }
- n.DropRef(flags)
- if n.TotalRefs() > 0 {
- p[dst] = n
- continue
- }
- delete(p, dst)
- if len(p) > 0 {
- continue
- }
- delete(d, bindToDevice)
- if len(d) > 0 {
- continue
- }
- delete(m, addr)
- if len(m) > 0 {
- continue
- }
- delete(s.allocatedPorts, desc)
+func (pm *PortManager) releasePortLocked(res Reservation) {
+ dst := res.dst()
+ for _, network := range res.Networks {
+ desc := portDescriptor{network, res.Transport, res.Port}
+ addrToDev, ok := pm.allocatedPorts[desc]
+ if !ok {
+ continue
}
+ devToDest, ok := addrToDev[res.Addr]
+ if !ok {
+ continue
+ }
+ destToCounter, ok := devToDest[res.BindToDevice]
+ if !ok {
+ continue
+ }
+ counter, ok := destToCounter[dst]
+ if !ok {
+ continue
+ }
+ counter.DropRef(res.Flags.Bits())
+ if counter.TotalRefs() > 0 {
+ destToCounter[dst] = counter
+ continue
+ }
+ delete(destToCounter, dst)
+ if len(destToCounter) > 0 {
+ continue
+ }
+ delete(devToDest, res.BindToDevice)
+ if len(devToDest) > 0 {
+ continue
+ }
+ delete(addrToDev, res.Addr)
+ if len(addrToDev) > 0 {
+ continue
+ }
+ delete(pm.allocatedPorts, desc)
+ }
+}
+
+// PortRange returns the UDP and TCP inclusive range of ephemeral ports used in
+// both IPv4 and IPv6.
+func (pm *PortManager) PortRange() (uint16, uint16) {
+ pm.ephemeralMu.RLock()
+ defer pm.ephemeralMu.RUnlock()
+ return pm.firstEphemeral, pm.firstEphemeral + pm.numEphemeral - 1
+}
+
+// SetPortRange sets the UDP and TCP IPv4 and IPv6 ephemeral port range
+// (inclusive).
+func (pm *PortManager) SetPortRange(start uint16, end uint16) tcpip.Error {
+ if start > end {
+ return &tcpip.ErrInvalidPortRange{}
}
+ pm.ephemeralMu.Lock()
+ defer pm.ephemeralMu.Unlock()
+ pm.firstEphemeral = start
+ pm.numEphemeral = end - start + 1
+ return nil
}
diff --git a/pkg/tcpip/ports/ports_test.go b/pkg/tcpip/ports/ports_test.go
index e70fbb72b..0f43dc8f8 100644
--- a/pkg/tcpip/ports/ports_test.go
+++ b/pkg/tcpip/ports/ports_test.go
@@ -329,16 +329,35 @@ func TestPortReservation(t *testing.T) {
net := []tcpip.NetworkProtocolNumber{fakeNetworkNumber}
for _, test := range test.actions {
+ first, _ := pm.PortRange()
if test.release {
- pm.ReleasePort(net, fakeTransNumber, test.ip, test.port, test.flags, test.device, test.dest)
+ portRes := Reservation{
+ Networks: net,
+ Transport: fakeTransNumber,
+ Addr: test.ip,
+ Port: test.port,
+ Flags: test.flags,
+ BindToDevice: test.device,
+ Dest: test.dest,
+ }
+ pm.ReleasePort(portRes)
continue
}
- gotPort, err := pm.ReservePort(net, fakeTransNumber, test.ip, test.port, test.flags, test.device, test.dest, nil /* testPort */)
+ portRes := Reservation{
+ Networks: net,
+ Transport: fakeTransNumber,
+ Addr: test.ip,
+ Port: test.port,
+ Flags: test.flags,
+ BindToDevice: test.device,
+ Dest: test.dest,
+ }
+ gotPort, err := pm.ReservePort(portRes, nil /* testPort */)
if diff := cmp.Diff(test.want, err); diff != "" {
- t.Fatalf("unexpected error from ReservePort(.., .., %s, %d, %+v, %d, %v), (-want, +got):\n%s", test.ip, test.port, test.flags, test.device, test.dest, diff)
+ t.Fatalf("unexpected error from ReservePort(%+v, _), (-want, +got):\n%s", portRes, diff)
}
- if test.port == 0 && (gotPort == 0 || gotPort < FirstEphemeral) {
- t.Fatalf("ReservePort(.., .., .., 0, ..) = %d, want port number >= %d to be picked", gotPort, FirstEphemeral)
+ if test.port == 0 && (gotPort == 0 || gotPort < first) {
+ t.Fatalf("ReservePort(%+v, _) = %d, want port number >= %d to be picked", portRes, gotPort, first)
}
}
})
@@ -346,6 +365,11 @@ func TestPortReservation(t *testing.T) {
}
func TestPickEphemeralPort(t *testing.T) {
+ const (
+ firstEphemeral = 32000
+ numEphemeralPorts = 1000
+ )
+
for _, test := range []struct {
name string
f func(port uint16) (bool, tcpip.Error)
@@ -369,17 +393,17 @@ func TestPickEphemeralPort(t *testing.T) {
{
name: "only-port-16042-available",
f: func(port uint16) (bool, tcpip.Error) {
- if port == FirstEphemeral+42 {
+ if port == firstEphemeral+42 {
return true, nil
}
return false, nil
},
- wantPort: FirstEphemeral + 42,
+ wantPort: firstEphemeral + 42,
},
{
name: "only-port-under-16000-available",
f: func(port uint16) (bool, tcpip.Error) {
- if port < FirstEphemeral {
+ if port < firstEphemeral {
return true, nil
}
return false, nil
@@ -389,6 +413,9 @@ func TestPickEphemeralPort(t *testing.T) {
} {
t.Run(test.name, func(t *testing.T) {
pm := NewPortManager()
+ if err := pm.SetPortRange(firstEphemeral, firstEphemeral+numEphemeralPorts); err != nil {
+ t.Fatalf("failed to set ephemeral port range: %s", err)
+ }
port, err := pm.PickEphemeralPort(test.f)
if diff := cmp.Diff(test.wantErr, err); diff != "" {
t.Fatalf("unexpected error from PickEphemeralPort(..), (-want, +got):\n%s", diff)
@@ -401,6 +428,11 @@ func TestPickEphemeralPort(t *testing.T) {
}
func TestPickEphemeralPortStable(t *testing.T) {
+ const (
+ firstEphemeral = 32000
+ numEphemeralPorts = 1000
+ )
+
for _, test := range []struct {
name string
f func(port uint16) (bool, tcpip.Error)
@@ -424,17 +456,17 @@ func TestPickEphemeralPortStable(t *testing.T) {
{
name: "only-port-16042-available",
f: func(port uint16) (bool, tcpip.Error) {
- if port == FirstEphemeral+42 {
+ if port == firstEphemeral+42 {
return true, nil
}
return false, nil
},
- wantPort: FirstEphemeral + 42,
+ wantPort: firstEphemeral + 42,
},
{
name: "only-port-under-16000-available",
f: func(port uint16) (bool, tcpip.Error) {
- if port < FirstEphemeral {
+ if port < firstEphemeral {
return true, nil
}
return false, nil
@@ -444,7 +476,10 @@ func TestPickEphemeralPortStable(t *testing.T) {
} {
t.Run(test.name, func(t *testing.T) {
pm := NewPortManager()
- portOffset := uint32(rand.Int31n(int32(numEphemeralPorts)))
+ if err := pm.SetPortRange(firstEphemeral, firstEphemeral+numEphemeralPorts); err != nil {
+ t.Fatalf("failed to set ephemeral port range: %s", err)
+ }
+ portOffset := uint16(rand.Int31n(int32(numEphemeralPorts)))
port, err := pm.PickEphemeralPortStable(portOffset, test.f)
if diff := cmp.Diff(test.wantErr, err); diff != "" {
t.Fatalf("unexpected error from PickEphemeralPort(..), (-want, +got):\n%s", diff)
diff --git a/pkg/tcpip/stack/conntrack.go b/pkg/tcpip/stack/conntrack.go
index cdb435644..3f083928f 100644
--- a/pkg/tcpip/stack/conntrack.go
+++ b/pkg/tcpip/stack/conntrack.go
@@ -407,12 +407,12 @@ func handlePacketOutput(pkt *PacketBuffer, conn *conn, gso *GSO, r *Route, dir d
// Calculate the TCP checksum and set it.
tcpHeader.SetChecksum(0)
- length := uint16(len(tcpHeader) + pkt.Data.Size())
+ length := uint16(len(tcpHeader) + pkt.Data().Size())
xsum := header.PseudoHeaderChecksum(header.TCPProtocolNumber, netHeader.SourceAddress(), netHeader.DestinationAddress(), length)
if gso != nil && gso.NeedsCsum {
tcpHeader.SetChecksum(xsum)
} else if r.RequiresTXTransportChecksum() {
- xsum = header.ChecksumVV(pkt.Data, xsum)
+ xsum = header.ChecksumCombine(xsum, pkt.Data().AsRange().Checksum())
tcpHeader.SetChecksum(^tcpHeader.CalculateChecksum(xsum))
}
diff --git a/pkg/tcpip/stack/iptables_targets.go b/pkg/tcpip/stack/iptables_targets.go
index d63e9757c..0e8b90c9b 100644
--- a/pkg/tcpip/stack/iptables_targets.go
+++ b/pkg/tcpip/stack/iptables_targets.go
@@ -153,7 +153,7 @@ func (rt *RedirectTarget) Action(pkt *PacketBuffer, ct *ConnTrack, hook Hook, gs
if r.RequiresTXTransportChecksum() {
length := uint16(pkt.Size()) - uint16(len(pkt.NetworkHeader().View()))
xsum := header.PseudoHeaderChecksum(protocol, netHeader.SourceAddress(), netHeader.DestinationAddress(), length)
- xsum = header.ChecksumVV(pkt.Data, xsum)
+ xsum = header.ChecksumCombine(xsum, pkt.Data().AsRange().Checksum())
udpHeader.SetChecksum(^udpHeader.CalculateChecksum(xsum))
}
}
diff --git a/pkg/tcpip/stack/ndp_test.go b/pkg/tcpip/stack/ndp_test.go
index 740bdac28..47796a6ba 100644
--- a/pkg/tcpip/stack/ndp_test.go
+++ b/pkg/tcpip/stack/ndp_test.go
@@ -99,12 +99,11 @@ func prefixSubnetAddr(offset uint8, linkAddr tcpip.LinkAddress) (tcpip.AddressWi
}
// ndpDADEvent is a set of parameters that was passed to
-// ndpDispatcher.OnDuplicateAddressDetectionStatus.
+// ndpDispatcher.OnDuplicateAddressDetectionResult.
type ndpDADEvent struct {
- nicID tcpip.NICID
- addr tcpip.Address
- resolved bool
- err tcpip.Error
+ nicID tcpip.NICID
+ addr tcpip.Address
+ res stack.DADResult
}
type ndpRouterEvent struct {
@@ -173,14 +172,13 @@ type ndpDispatcher struct {
dhcpv6ConfigurationC chan ndpDHCPv6Event
}
-// Implements ipv6.NDPDispatcher.OnDuplicateAddressDetectionStatus.
-func (n *ndpDispatcher) OnDuplicateAddressDetectionStatus(nicID tcpip.NICID, addr tcpip.Address, resolved bool, err tcpip.Error) {
+// Implements ipv6.NDPDispatcher.OnDuplicateAddressDetectionResult.
+func (n *ndpDispatcher) OnDuplicateAddressDetectionResult(nicID tcpip.NICID, addr tcpip.Address, res stack.DADResult) {
if n.dadC != nil {
n.dadC <- ndpDADEvent{
nicID,
addr,
- resolved,
- err,
+ res,
}
}
}
@@ -311,8 +309,8 @@ func (l *channelLinkWithHeaderLength) MaxHeaderLength() uint16 {
// Check e to make sure that the event is for addr on nic with ID 1, and the
// resolved flag set to resolved with the specified err.
-func checkDADEvent(e ndpDADEvent, nicID tcpip.NICID, addr tcpip.Address, resolved bool, err tcpip.Error) string {
- return cmp.Diff(ndpDADEvent{nicID: nicID, addr: addr, resolved: resolved, err: err}, e, cmp.AllowUnexported(e))
+func checkDADEvent(e ndpDADEvent, nicID tcpip.NICID, addr tcpip.Address, res stack.DADResult) string {
+ return cmp.Diff(ndpDADEvent{nicID: nicID, addr: addr, res: res}, e, cmp.AllowUnexported(e))
}
// TestDADDisabled tests that an address successfully resolves immediately
@@ -344,8 +342,8 @@ func TestDADDisabled(t *testing.T) {
// DAD on it.
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr1, true, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr1, &stack.DADSucceeded{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
default:
t.Fatal("expected DAD event")
@@ -491,8 +489,8 @@ func TestDADResolve(t *testing.T) {
case <-time.After(defaultAsyncPositiveEventTimeout):
t.Fatal("timed out waiting for DAD resolution")
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr1, true, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr1, &stack.DADSucceeded{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
}
if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, addrWithPrefix); err != nil {
@@ -573,7 +571,11 @@ func rxNDPSolicit(e *channel.Endpoint, tgt tcpip.Address) {
ns := header.NDPNeighborSolicit(pkt.MessageBody())
ns.SetTargetAddress(tgt)
snmc := header.SolicitedNodeAddr(tgt)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, header.IPv6Any, snmc, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: header.IPv6Any,
+ Dst: snmc,
+ }))
payloadLength := hdr.UsedLength()
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
@@ -594,9 +596,10 @@ func TestDADFail(t *testing.T) {
const nicID = 1
tests := []struct {
- name string
- rxPkt func(e *channel.Endpoint, tgt tcpip.Address)
- getStat func(s tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter
+ name string
+ rxPkt func(e *channel.Endpoint, tgt tcpip.Address)
+ getStat func(s tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter
+ expectedHolderLinkAddress tcpip.LinkAddress
}{
{
name: "RxSolicit",
@@ -604,6 +607,7 @@ func TestDADFail(t *testing.T) {
getStat: func(s tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
return s.NeighborSolicit
},
+ expectedHolderLinkAddress: "",
},
{
name: "RxAdvert",
@@ -619,7 +623,11 @@ func TestDADFail(t *testing.T) {
na.Options().Serialize(header.NDPOptionsSerializer{
header.NDPTargetLinkLayerAddressOption(linkAddr1),
})
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, tgt, header.IPv6AllNodesMulticastAddress, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: tgt,
+ Dst: header.IPv6AllNodesMulticastAddress,
+ }))
payloadLength := hdr.UsedLength()
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
@@ -634,6 +642,7 @@ func TestDADFail(t *testing.T) {
getStat: func(s tcpip.ICMPv6ReceivedPacketStats) *tcpip.StatCounter {
return s.NeighborAdvert
},
+ expectedHolderLinkAddress: linkAddr1,
},
}
@@ -683,8 +692,8 @@ func TestDADFail(t *testing.T) {
// something is wrong.
t.Fatal("timed out waiting for DAD failure")
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr1, false, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr1, &stack.DADDupAddrDetected{HolderLinkAddress: test.expectedHolderLinkAddress}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
}
if err := checkGetMainNICAddress(s, nicID, header.IPv6ProtocolNumber, tcpip.AddressWithPrefix{}); err != nil {
@@ -782,8 +791,8 @@ func TestDADStop(t *testing.T) {
// time + extra 1s buffer, something is wrong.
t.Fatal("timed out waiting for DAD failure")
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr1, false, &tcpip.ErrAborted{}); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr1, &stack.DADAborted{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
}
@@ -844,8 +853,8 @@ func TestSetNDPConfigurations(t *testing.T) {
expectDADEvent := func(nicID tcpip.NICID, addr tcpip.Address) {
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr, true, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr, &stack.DADSucceeded{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
default:
t.Fatalf("expected DAD event for %s", addr)
@@ -936,8 +945,8 @@ func TestSetNDPConfigurations(t *testing.T) {
// means something is wrong.
t.Fatal("timed out waiting for DAD resolution")
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID1, addr1, true, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID1, addr1, &stack.DADSucceeded{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
}
if err := checkGetMainNICAddress(s, nicID1, header.IPv6ProtocolNumber, addrWithPrefix1); err != nil {
@@ -973,7 +982,11 @@ func raBufWithOptsAndDHCPv6(ip tcpip.Address, rl uint16, managedAddress, otherCo
}
opts := ra.Options()
opts.Serialize(optSer)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, ip, header.IPv6AllNodesMulticastAddress, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: ip,
+ Dst: header.IPv6AllNodesMulticastAddress,
+ }))
payloadLength := hdr.UsedLength()
iph := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
iph.Encode(&header.IPv6Fields{
@@ -1951,8 +1964,8 @@ func TestAutoGenTempAddr(t *testing.T) {
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr, true, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr, &stack.DADSucceeded{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
case <-time.After(time.Duration(test.dupAddrTransmits)*test.retransmitTimer + defaultAsyncPositiveEventTimeout):
t.Fatal("timed out waiting for DAD event")
@@ -2157,8 +2170,8 @@ func TestNoAutoGenTempAddrForLinkLocal(t *testing.T) {
}
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, llAddr1, true, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, llAddr1, &stack.DADSucceeded{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
case <-time.After(time.Duration(test.dupAddrTransmits)*test.retransmitTimer + defaultAsyncPositiveEventTimeout):
t.Fatal("timed out waiting for DAD event")
@@ -2245,8 +2258,8 @@ func TestNoAutoGenTempAddrWithoutStableAddr(t *testing.T) {
// address to be generated.
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.Address, true, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr.Address, &stack.DADSucceeded{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
case <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):
t.Fatal("timed out waiting for DAD event")
@@ -2711,8 +2724,8 @@ func TestMixedSLAACAddrConflictRegen(t *testing.T) {
t.Helper()
clock.Advance(dupAddrTransmits * retransmitTimer)
- if diff := checkDADEvent(<-ndpDisp.dadC, nicID, addr, true, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(<-ndpDisp.dadC, nicID, addr, &stack.DADSucceeded{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
}
@@ -2742,8 +2755,8 @@ func TestMixedSLAACAddrConflictRegen(t *testing.T) {
rxNDPSolicit(e, addr.Address)
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.Address, false, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr.Address, &stack.DADDupAddrDetected{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
default:
t.Fatal("expected DAD event")
@@ -3841,26 +3854,26 @@ func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {
}
}
- expectDADEvent := func(t *testing.T, ndpDisp *ndpDispatcher, addr tcpip.Address, resolved bool, err tcpip.Error) {
+ expectDADEvent := func(t *testing.T, ndpDisp *ndpDispatcher, addr tcpip.Address, res stack.DADResult) {
t.Helper()
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr, resolved, err); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr, res); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
default:
t.Fatal("expected DAD event")
}
}
- expectDADEventAsync := func(t *testing.T, ndpDisp *ndpDispatcher, addr tcpip.Address, resolved bool) {
+ expectDADEventAsync := func(t *testing.T, ndpDisp *ndpDispatcher, addr tcpip.Address, res stack.DADResult) {
t.Helper()
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr, resolved, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr, res); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
case <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):
t.Fatal("timed out waiting for DAD event")
@@ -3917,7 +3930,7 @@ func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {
// generated.
e.InjectInbound(header.IPv6ProtocolNumber, raBufWithPI(llAddr2, 0, prefix, true, true, 100, 100))
expectAutoGenAddrEvent(t, ndpDisp, stableAddrForTempAddrTest, newAddr)
- expectDADEventAsync(t, ndpDisp, stableAddrForTempAddrTest.Address, true)
+ expectDADEventAsync(t, ndpDisp, stableAddrForTempAddrTest.Address, &stack.DADSucceeded{})
// The stable address will be assigned throughout the test.
return []tcpip.AddressWithPrefix{stableAddrForTempAddrTest}
@@ -3992,7 +4005,7 @@ func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {
// Simulate a DAD conflict.
rxNDPSolicit(e, addr.Address)
expectAutoGenAddrEvent(t, &ndpDisp, addr, invalidatedAddr)
- expectDADEvent(t, &ndpDisp, addr.Address, false, nil)
+ expectDADEvent(t, &ndpDisp, addr.Address, &stack.DADDupAddrDetected{})
// Attempting to add the address manually should not fail if the
// address's state was cleaned up when DAD failed.
@@ -4002,7 +4015,7 @@ func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {
if err := s.RemoveAddress(nicID, addr.Address); err != nil {
t.Fatalf("RemoveAddress(%d, %s) = %s", nicID, addr.Address, err)
}
- expectDADEvent(t, &ndpDisp, addr.Address, false, &tcpip.ErrAborted{})
+ expectDADEvent(t, &ndpDisp, addr.Address, &stack.DADAborted{})
}
// Should not have any new addresses assigned to the NIC.
@@ -4015,7 +4028,7 @@ func TestAutoGenAddrInResponseToDADConflicts(t *testing.T) {
if maxRetries+1 > numFailures {
addr := addrType.addrGenFn(numFailures, tempIIDHistory[:])
expectAutoGenAddrEventAsync(t, &ndpDisp, addr, newAddr)
- expectDADEventAsync(t, &ndpDisp, addr.Address, true)
+ expectDADEventAsync(t, &ndpDisp, addr.Address, &stack.DADSucceeded{})
if mismatch := addressCheck(s.NICInfo()[nicID].ProtocolAddresses, append(stableAddrs, addr), nil); mismatch != "" {
t.Fatal(mismatch)
}
@@ -4132,8 +4145,8 @@ func TestAutoGenAddrWithEUI64IIDNoDADRetries(t *testing.T) {
expectAutoGenAddrEvent(addr, invalidatedAddr)
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.Address, false, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr.Address, &stack.DADDupAddrDetected{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
default:
t.Fatal("expected DAD event")
@@ -4231,8 +4244,8 @@ func TestAutoGenAddrContinuesLifetimesAfterRetry(t *testing.T) {
expectAutoGenAddrEvent(addr, invalidatedAddr)
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.Address, false, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr.Address, &stack.DADDupAddrDetected{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
default:
t.Fatal("expected DAD event")
@@ -4243,8 +4256,8 @@ func TestAutoGenAddrContinuesLifetimesAfterRetry(t *testing.T) {
expectAutoGenAddrEvent(addr, newAddr)
select {
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.Address, true, nil); diff != "" {
- t.Errorf("dad event mismatch (-want +got):\n%s", diff)
+ if diff := checkDADEvent(e, nicID, addr.Address, &stack.DADSucceeded{}); diff != "" {
+ t.Errorf("DAD event mismatch (-want +got):\n%s", diff)
}
case <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):
t.Fatal("timed out waiting for DAD event")
diff --git a/pkg/tcpip/stack/neighbor_cache_test.go b/pkg/tcpip/stack/neighbor_cache_test.go
index afff1b434..48bb75e2f 100644
--- a/pkg/tcpip/stack/neighbor_cache_test.go
+++ b/pkg/tcpip/stack/neighbor_cache_test.go
@@ -59,21 +59,24 @@ const (
infiniteDuration = time.Duration(math.MaxInt64)
)
-// entryDiffOpts returns the options passed to cmp.Diff to compare neighbor
-// entries. The UpdatedAtNanos field is ignored due to a lack of a
-// deterministic method to predict the time that an event will be dispatched.
-func entryDiffOpts() []cmp.Option {
+// unorderedEventsDiffOpts returns options passed to cmp.Diff to sort slices of
+// events for cases where ordering must be ignored.
+func unorderedEventsDiffOpts() []cmp.Option {
return []cmp.Option{
- cmpopts.IgnoreFields(NeighborEntry{}, "UpdatedAtNanos"),
+ cmpopts.SortSlices(func(a, b testEntryEventInfo) bool {
+ return strings.Compare(string(a.Entry.Addr), string(b.Entry.Addr)) < 0
+ }),
}
}
-// entryDiffOptsWithSort is like entryDiffOpts but also includes an option to
-// sort slices of entries for cases where ordering must be ignored.
-func entryDiffOptsWithSort() []cmp.Option {
- return append(entryDiffOpts(), cmpopts.SortSlices(func(a, b NeighborEntry) bool {
- return strings.Compare(string(a.Addr), string(b.Addr)) < 0
- }))
+// unorderedEntriesDiffOpts returns options passed to cmp.Diff to sort slices of
+// entries for cases where ordering must be ignored.
+func unorderedEntriesDiffOpts() []cmp.Option {
+ return []cmp.Option{
+ cmpopts.SortSlices(func(a, b NeighborEntry) bool {
+ return strings.Compare(string(a.Addr), string(b.Addr)) < 0
+ }),
+ }
}
func newTestNeighborResolver(nudDisp NUDDispatcher, config NUDConfigurations, clock tcpip.Clock) *testNeighborResolver {
@@ -280,48 +283,105 @@ func TestNeighborCacheSetConfig(t *testing.T) {
}
}
-func TestNeighborCacheEntry(t *testing.T) {
- c := DefaultNUDConfigurations()
- nudDisp := testNUDDispatcher{}
- clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(&nudDisp, c, clock)
+func addReachableEntryWithRemoved(nudDisp *testNUDDispatcher, clock *faketime.ManualClock, linkRes *testNeighborResolver, entry NeighborEntry, removed []NeighborEntry) error {
+ var gotLinkResolutionResult LinkResolutionResult
- entry, ok := linkRes.entries.entry(0)
- if !ok {
- t.Fatal("linkRes.entries.entry(0) not found")
- }
- _, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
+ _, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
+ gotLinkResolutionResult = r
+ })
if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Errorf("got linkRes.neigh.entry(%s, '', nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
+ return fmt.Errorf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
}
- clock.Advance(typicalLatency)
+ {
+ var wantEvents []testEntryEventInfo
- wantEvents := []testEntryEventInfo{
- {
+ for _, removedEntry := range removed {
+ wantEvents = append(wantEvents, testEntryEventInfo{
+ EventType: entryTestRemoved,
+ NICID: 1,
+ Entry: NeighborEntry{
+ Addr: removedEntry.Addr,
+ LinkAddr: removedEntry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: clock.NowNanoseconds(),
+ },
+ })
+ }
+
+ wantEvents = append(wantEvents, testEntryEventInfo{
EventType: entryTestAdded,
NICID: 1,
Entry: NeighborEntry{
- Addr: entry.Addr,
- State: Incomplete,
+ Addr: entry.Addr,
+ LinkAddr: "",
+ State: Incomplete,
+ UpdatedAtNanos: clock.NowNanoseconds(),
},
- },
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
+ })
+
+ nudDisp.mu.Lock()
+ diff := cmp.Diff(wantEvents, nudDisp.mu.events)
+ nudDisp.mu.events = nil
+ nudDisp.mu.Unlock()
+ if diff != "" {
+ return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
+ }
+
+ clock.Advance(typicalLatency)
+
+ select {
+ case <-ch:
+ default:
+ return fmt.Errorf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
+ }
+ wantLinkResolutionResult := LinkResolutionResult{LinkAddress: entry.LinkAddr, Err: nil}
+ if diff := cmp.Diff(wantLinkResolutionResult, gotLinkResolutionResult); diff != "" {
+ return fmt.Errorf("got link resolution result mismatch (-want +got):\n%s", diff)
+ }
+
+ {
+ wantEvents := []testEntryEventInfo{
+ {
+ EventType: entryTestChanged,
+ NICID: 1,
+ Entry: NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: clock.NowNanoseconds(),
+ },
},
- },
+ }
+ nudDisp.mu.Lock()
+ diff := cmp.Diff(wantEvents, nudDisp.mu.events)
+ nudDisp.mu.events = nil
+ nudDisp.mu.Unlock()
+ if diff != "" {
+ return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
}
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events, eventDiffOpts()...)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+
+ return nil
+}
+
+func addReachableEntry(nudDisp *testNUDDispatcher, clock *faketime.ManualClock, linkRes *testNeighborResolver, entry NeighborEntry) error {
+ return addReachableEntryWithRemoved(nudDisp, clock, linkRes, entry, nil /* removed */)
+}
+
+func TestNeighborCacheEntry(t *testing.T) {
+ c := DefaultNUDConfigurations()
+ nudDisp := testNUDDispatcher{}
+ clock := faketime.NewManualClock()
+ linkRes := newTestNeighborResolver(&nudDisp, c, clock)
+
+ entry, ok := linkRes.entries.entry(0)
+ if !ok {
+ t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
+ }
+ if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
+ t.Fatalf("addReachableEntry(...) = %s", err)
}
if _, _, err := linkRes.neigh.entry(entry.Addr, "", nil); err != nil {
@@ -345,41 +405,10 @@ func TestNeighborCacheRemoveEntry(t *testing.T) {
entry, ok := linkRes.entries.entry(0)
if !ok {
- t.Fatal("linkRes.entries.entry(0) not found")
+ t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
}
-
- _, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Errorf("got linkRes.neigh.entry(%s, '', nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
-
- clock.Advance(typicalLatency)
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- State: Incomplete,
- },
- },
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events, eventDiffOpts()...)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
+ t.Fatalf("addReachableEntry(...) = %s", err)
}
linkRes.neigh.removeEntry(entry.Addr)
@@ -390,14 +419,15 @@ func TestNeighborCacheRemoveEntry(t *testing.T) {
EventType: entryTestRemoved,
NICID: 1,
Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: clock.NowNanoseconds(),
},
},
}
nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events, eventDiffOpts()...)
+ diff := cmp.Diff(wantEvents, nudDisp.mu.events)
nudDisp.mu.Unlock()
if diff != "" {
t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
@@ -439,18 +469,7 @@ func (c *testContext) overflowCache(opts overflowOptions) error {
// Fill the neighbor cache to capacity to verify the LRU eviction strategy is
// working properly after the entry removal.
for i := opts.startAtEntryIndex; i < c.linkRes.entries.size(); i++ {
- // Add a new entry
- entry, ok := c.linkRes.entries.entry(i)
- if !ok {
- return fmt.Errorf("c.linkRes.entries.entry(%d) not found", i)
- }
- _, _, err := c.linkRes.neigh.entry(entry.Addr, "", nil)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- return fmt.Errorf("got c.linkRes.neigh.entry(%s, '', nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- c.clock.Advance(c.linkRes.neigh.config().RetransmitTimer)
-
- var wantEvents []testEntryEventInfo
+ var removedEntries []NeighborEntry
// When beyond the full capacity, the cache will evict an entry as per the
// LRU eviction strategy. Note that the number of static entries should not
@@ -458,63 +477,40 @@ func (c *testContext) overflowCache(opts overflowOptions) error {
if i >= neighborCacheSize+opts.startAtEntryIndex {
removedEntry, ok := c.linkRes.entries.entry(i - neighborCacheSize)
if !ok {
- return fmt.Errorf("linkRes.entries.entry(%d) not found", i-neighborCacheSize)
+ return fmt.Errorf("got linkRes.entries.entry(%d) = _, false, want = true", i-neighborCacheSize)
}
- wantEvents = append(wantEvents, testEntryEventInfo{
- EventType: entryTestRemoved,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: removedEntry.Addr,
- LinkAddr: removedEntry.LinkAddr,
- State: Reachable,
- },
- })
+ removedEntries = append(removedEntries, removedEntry)
}
- wantEvents = append(wantEvents, testEntryEventInfo{
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- State: Incomplete,
- },
- }, testEntryEventInfo{
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- },
- })
-
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- return fmt.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ entry, ok := c.linkRes.entries.entry(i)
+ if !ok {
+ return fmt.Errorf("got c.linkRes.entries.entry(%d) = _, false, want = true", i)
+ }
+ if err := addReachableEntryWithRemoved(c.nudDisp, c.clock, c.linkRes, entry, removedEntries); err != nil {
+ return fmt.Errorf("addReachableEntryWithRemoved(...) = %s", err)
}
}
// Expect to find only the most recent entries. The order of entries reported
// by entries() is nondeterministic, so entries have to be sorted before
// comparison.
- wantUnsortedEntries := opts.wantStaticEntries
+ wantUnorderedEntries := opts.wantStaticEntries
for i := c.linkRes.entries.size() - neighborCacheSize; i < c.linkRes.entries.size(); i++ {
entry, ok := c.linkRes.entries.entry(i)
if !ok {
- return fmt.Errorf("c.linkRes.entries.entry(%d) not found", i)
+ return fmt.Errorf("got c.linkRes.entries.entry(%d) = _, false, want = true", i)
}
+ durationReachableNanos := int64(c.linkRes.entries.size()-i-1) * typicalLatency.Nanoseconds()
wantEntry := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: c.clock.NowNanoseconds() - durationReachableNanos,
}
- wantUnsortedEntries = append(wantUnsortedEntries, wantEntry)
+ wantUnorderedEntries = append(wantUnorderedEntries, wantEntry)
}
- if diff := cmp.Diff(wantUnsortedEntries, c.linkRes.neigh.entries(), entryDiffOptsWithSort()...); diff != "" {
+ if diff := cmp.Diff(wantUnorderedEntries, c.linkRes.neigh.entries(), unorderedEntriesDiffOpts()...); diff != "" {
return fmt.Errorf("neighbor entries mismatch (-want, +got):\n%s", diff)
}
@@ -560,38 +556,10 @@ func TestNeighborCacheRemoveEntryThenOverflow(t *testing.T) {
// Add a dynamic entry
entry, ok := c.linkRes.entries.entry(0)
if !ok {
- t.Fatal("c.linkRes.entries.entry(0) not found")
+ t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
}
- _, _, err := c.linkRes.neigh.entry(entry.Addr, "", nil)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Errorf("got c.linkRes.neigh.entry(%s, '', nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- c.clock.Advance(c.linkRes.neigh.config().RetransmitTimer)
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- State: Incomplete,
- },
- },
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- },
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ if err := addReachableEntry(c.nudDisp, c.clock, c.linkRes, entry); err != nil {
+ t.Fatalf("addReachableEntry(...) = %s", err)
}
// Remove the entry
@@ -603,14 +571,15 @@ func TestNeighborCacheRemoveEntryThenOverflow(t *testing.T) {
EventType: entryTestRemoved,
NICID: 1,
Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
},
},
}
c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
+ diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
c.nudDisp.mu.events = nil
c.nudDisp.mu.Unlock()
if diff != "" {
@@ -636,33 +605,36 @@ func TestNeighborCacheDuplicateStaticEntryWithSameLinkAddress(t *testing.T) {
// Add a static entry
entry, ok := c.linkRes.entries.entry(0)
if !ok {
- t.Fatal("c.linkRes.entries.entry(0) not found")
+ t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
}
staticLinkAddr := entry.LinkAddr + "static"
c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
+
+ {
+ wantEvents := []testEntryEventInfo{
+ {
+ EventType: entryTestAdded,
+ NICID: 1,
+ Entry: NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: staticLinkAddr,
+ State: Static,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
+ },
},
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
+ c.nudDisp.mu.Lock()
+ diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
+ c.nudDisp.mu.events = nil
+ c.nudDisp.mu.Unlock()
+ if diff != "" {
+ t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
}
- // Remove the static entry that was just added
+ // Add a duplicate static entry with the same link address.
c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
- // No more events should have been dispatched.
c.nudDisp.mu.Lock()
defer c.nudDisp.mu.Unlock()
if diff := cmp.Diff([]testEntryEventInfo(nil), c.nudDisp.mu.events); diff != "" {
@@ -680,48 +652,56 @@ func TestNeighborCacheDuplicateStaticEntryWithDifferentLinkAddress(t *testing.T)
// Add a static entry
entry, ok := c.linkRes.entries.entry(0)
if !ok {
- t.Fatal("c.linkRes.entries.entry(0) not found")
+ t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
}
staticLinkAddr := entry.LinkAddr + "static"
c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
+
+ {
+ wantEvents := []testEntryEventInfo{
+ {
+ EventType: entryTestAdded,
+ NICID: 1,
+ Entry: NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: staticLinkAddr,
+ State: Static,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
+ },
},
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
+ c.nudDisp.mu.Lock()
+ diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
+ c.nudDisp.mu.events = nil
+ c.nudDisp.mu.Unlock()
+ if diff != "" {
+ t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
}
// Add a duplicate entry with a different link address
staticLinkAddr += "duplicate"
c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
+
{
wantEvents := []testEntryEventInfo{
{
EventType: entryTestChanged,
NICID: 1,
Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
+ Addr: entry.Addr,
+ LinkAddr: staticLinkAddr,
+ State: Static,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
},
},
}
c.nudDisp.mu.Lock()
- defer c.nudDisp.mu.Unlock()
- if diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...); diff != "" {
- t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
+ c.nudDisp.mu.events = nil
+ c.nudDisp.mu.Unlock()
+ if diff != "" {
+ t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
}
}
}
@@ -742,45 +722,51 @@ func TestNeighborCacheRemoveStaticEntryThenOverflow(t *testing.T) {
// Add a static entry
entry, ok := c.linkRes.entries.entry(0)
if !ok {
- t.Fatal("c.linkRes.entries.entry(0) not found")
+ t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
}
staticLinkAddr := entry.LinkAddr + "static"
c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
+
+ {
+ wantEvents := []testEntryEventInfo{
+ {
+ EventType: entryTestAdded,
+ NICID: 1,
+ Entry: NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: staticLinkAddr,
+ State: Static,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
+ },
},
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
+ c.nudDisp.mu.Lock()
+ diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
+ c.nudDisp.mu.events = nil
+ c.nudDisp.mu.Unlock()
+ if diff != "" {
+ t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
}
// Remove the static entry that was just added
c.linkRes.neigh.removeEntry(entry.Addr)
+
{
wantEvents := []testEntryEventInfo{
{
EventType: entryTestRemoved,
NICID: 1,
Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
+ Addr: entry.Addr,
+ LinkAddr: staticLinkAddr,
+ State: Static,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
},
},
}
c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
+ diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
c.nudDisp.mu.events = nil
c.nudDisp.mu.Unlock()
if diff != "" {
@@ -812,66 +798,41 @@ func TestNeighborCacheOverwriteWithStaticEntryThenOverflow(t *testing.T) {
// Add a dynamic entry
entry, ok := c.linkRes.entries.entry(0)
if !ok {
- t.Fatal("c.linkRes.entries.entry(0) not found")
- }
- _, _, err := c.linkRes.neigh.entry(entry.Addr, "", nil)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Errorf("got c.linkRes.neigh.entry(%s, '', nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- c.clock.Advance(typicalLatency)
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- State: Incomplete,
- },
- },
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- },
- },
+ t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
}
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ if err := addReachableEntry(c.nudDisp, c.clock, c.linkRes, entry); err != nil {
+ t.Fatalf("addReachableEntry(...) = %s", err)
}
// Override the entry with a static one using the same address
staticLinkAddr := entry.LinkAddr + "static"
c.linkRes.neigh.addStaticEntry(entry.Addr, staticLinkAddr)
+
{
wantEvents := []testEntryEventInfo{
{
EventType: entryTestRemoved,
NICID: 1,
Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
},
},
{
EventType: entryTestAdded,
NICID: 1,
Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
+ Addr: entry.Addr,
+ LinkAddr: staticLinkAddr,
+ State: Static,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
},
},
}
c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
+ diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
c.nudDisp.mu.events = nil
c.nudDisp.mu.Unlock()
if diff != "" {
@@ -883,9 +844,10 @@ func TestNeighborCacheOverwriteWithStaticEntryThenOverflow(t *testing.T) {
startAtEntryIndex: 1,
wantStaticEntries: []NeighborEntry{
{
- Addr: entry.Addr,
- LinkAddr: staticLinkAddr,
- State: Static,
+ Addr: entry.Addr,
+ LinkAddr: staticLinkAddr,
+ State: Static,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
},
},
}
@@ -905,7 +867,7 @@ func TestNeighborCacheAddStaticEntryThenOverflow(t *testing.T) {
entry, ok := c.linkRes.entries.entry(0)
if !ok {
- t.Fatal("c.linkRes.entries.entry(0) not found")
+ t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
}
c.linkRes.neigh.addStaticEntry(entry.Addr, entry.LinkAddr)
e, _, err := c.linkRes.neigh.entry(entry.Addr, "", nil)
@@ -913,40 +875,45 @@ func TestNeighborCacheAddStaticEntryThenOverflow(t *testing.T) {
t.Errorf("unexpected error from c.linkRes.neigh.entry(%s, \"\", nil): %s", entry.Addr, err)
}
want := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Static,
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Static,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
}
- if diff := cmp.Diff(want, e, entryDiffOpts()...); diff != "" {
+ if diff := cmp.Diff(want, e); diff != "" {
t.Errorf("c.linkRes.neigh.entry(%s, \"\", nil) mismatch (-want, +got):\n%s", entry.Addr, diff)
}
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Static,
+ {
+ wantEvents := []testEntryEventInfo{
+ {
+ EventType: entryTestAdded,
+ NICID: 1,
+ Entry: NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Static,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
+ },
},
- },
- }
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
+ c.nudDisp.mu.Lock()
+ diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
+ c.nudDisp.mu.events = nil
+ c.nudDisp.mu.Unlock()
+ if diff != "" {
+ t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
}
opts := overflowOptions{
startAtEntryIndex: 1,
wantStaticEntries: []NeighborEntry{
{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Static,
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Static,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
},
},
}
@@ -965,39 +932,10 @@ func TestNeighborCacheClear(t *testing.T) {
// Add a dynamic entry.
entry, ok := linkRes.entries.entry(0)
if !ok {
- t.Fatal("linkRes.entries.entry(0) not found")
+ t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
}
- _, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Errorf("got linkRes.neigh.entry(%s, '', nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- clock.Advance(typicalLatency)
-
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- State: Incomplete,
- },
- },
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events, eventDiffOpts()...)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
+ t.Fatalf("addReachableEntry(...) = %s", err)
}
// Add a static entry.
@@ -1009,14 +947,15 @@ func TestNeighborCacheClear(t *testing.T) {
EventType: entryTestAdded,
NICID: 1,
Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Static,
+ Addr: entryTestAddr1,
+ LinkAddr: entryTestLinkAddr1,
+ State: Static,
+ UpdatedAtNanos: clock.NowNanoseconds(),
},
},
}
nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events, eventDiffOpts()...)
+ diff := cmp.Diff(wantEvents, nudDisp.mu.events)
nudDisp.mu.events = nil
nudDisp.mu.Unlock()
if diff != "" {
@@ -1028,30 +967,32 @@ func TestNeighborCacheClear(t *testing.T) {
linkRes.neigh.clear()
// Remove events dispatched from clear() have no deterministic order so they
- // need to be sorted beforehand.
- wantUnsortedEvents := []testEntryEventInfo{
+ // need to be sorted before comparison.
+ wantUnorderedEvents := []testEntryEventInfo{
{
EventType: entryTestRemoved,
NICID: 1,
Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: clock.NowNanoseconds(),
},
},
{
EventType: entryTestRemoved,
NICID: 1,
Entry: NeighborEntry{
- Addr: entryTestAddr1,
- LinkAddr: entryTestLinkAddr1,
- State: Static,
+ Addr: entryTestAddr1,
+ LinkAddr: entryTestLinkAddr1,
+ State: Static,
+ UpdatedAtNanos: clock.NowNanoseconds(),
},
},
}
nudDisp.mu.Lock()
defer nudDisp.mu.Unlock()
- if diff := cmp.Diff(wantUnsortedEvents, nudDisp.mu.events, eventDiffOptsWithSort()...); diff != "" {
+ if diff := cmp.Diff(wantUnorderedEvents, nudDisp.mu.events, unorderedEventsDiffOpts()...); diff != "" {
t.Errorf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
}
}
@@ -1071,56 +1012,30 @@ func TestNeighborCacheClearThenOverflow(t *testing.T) {
// Add a dynamic entry
entry, ok := c.linkRes.entries.entry(0)
if !ok {
- t.Fatal("c.linkRes.entries.entry(0) not found")
- }
- _, _, err := c.linkRes.neigh.entry(entry.Addr, "", nil)
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Errorf("got c.linkRes.neigh.entry(%s, '', nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- c.clock.Advance(typicalLatency)
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- State: Incomplete,
- },
- },
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- },
- },
+ t.Fatal("got c.linkRes.entries.entry(0) = _, false, want = true ")
}
- c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
- c.nudDisp.mu.events = nil
- c.nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ if err := addReachableEntry(c.nudDisp, c.clock, c.linkRes, entry); err != nil {
+ t.Fatalf("addReachableEntry(...) = %s", err)
}
// Clear the cache.
c.linkRes.neigh.clear()
+
{
wantEvents := []testEntryEventInfo{
{
EventType: entryTestRemoved,
NICID: 1,
Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: c.clock.NowNanoseconds(),
},
},
}
c.nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, c.nudDisp.mu.events, eventDiffOpts()...)
+ diff := cmp.Diff(wantEvents, c.nudDisp.mu.events)
c.nudDisp.mu.events = nil
c.nudDisp.mu.Unlock()
if diff != "" {
@@ -1147,10 +1062,7 @@ func TestNeighborCacheKeepFrequentlyUsed(t *testing.T) {
clock := faketime.NewManualClock()
linkRes := newTestNeighborResolver(&nudDisp, config, clock)
- frequentlyUsedEntry, ok := linkRes.entries.entry(0)
- if !ok {
- t.Fatal("linkRes.entries.entry(0) not found")
- }
+ startedAt := clock.NowNanoseconds()
// The following logic is very similar to overflowCache, but
// periodically refreshes the frequently used entry.
@@ -1159,50 +1071,18 @@ func TestNeighborCacheKeepFrequentlyUsed(t *testing.T) {
for i := 0; i < neighborCacheSize; i++ {
entry, ok := linkRes.entries.entry(i)
if !ok {
- t.Fatalf("linkRes.entries.entry(%d) not found", i)
+ t.Fatalf("got linkRes.entries.entry(%d) = _, false, want = true", i)
}
- _, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
- if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Err: nil}, r); diff != "" {
- t.Fatalf("got link resolution result mismatch (-want +got):\n%s", diff)
- }
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Errorf("got linkRes.neigh.entry(%s, '', _, _, nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- clock.Advance(typicalLatency)
- select {
- case <-ch:
- default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _, _, nil)", entry.Addr)
- }
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- State: Incomplete,
- },
- },
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- },
- },
- }
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events, eventDiffOpts()...)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
+ t.Fatalf("addReachableEntry(...) = %s", err)
}
}
+ frequentlyUsedEntry, ok := linkRes.entries.entry(0)
+ if !ok {
+ t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
+ }
+
// Keep adding more entries
for i := neighborCacheSize; i < linkRes.entries.size(); i++ {
// Periodically refresh the frequently used entry
@@ -1214,63 +1094,17 @@ func TestNeighborCacheKeepFrequentlyUsed(t *testing.T) {
entry, ok := linkRes.entries.entry(i)
if !ok {
- t.Fatalf("linkRes.entries.entry(%d) not found", i)
- }
-
- _, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
- if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Err: nil}, r); diff != "" {
- t.Fatalf("got link resolution result mismatch (-want +got):\n%s", diff)
- }
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Errorf("got linkRes.neigh.entry(%s, '', _, _, nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- clock.Advance(typicalLatency)
- select {
- case <-ch:
- default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _, _, nil)", entry.Addr)
+ t.Fatalf("got linkRes.entries.entry(%d) = _, false, want = true", i)
}
// An entry should have been removed, as per the LRU eviction strategy
removedEntry, ok := linkRes.entries.entry(i - neighborCacheSize + 1)
if !ok {
- t.Fatalf("linkRes.entries.entry(%d) not found", i-neighborCacheSize+1)
- }
- wantEvents := []testEntryEventInfo{
- {
- EventType: entryTestRemoved,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: removedEntry.Addr,
- LinkAddr: removedEntry.LinkAddr,
- State: Reachable,
- },
- },
- {
- EventType: entryTestAdded,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- State: Incomplete,
- },
- },
- {
- EventType: entryTestChanged,
- NICID: 1,
- Entry: NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- },
- },
+ t.Fatalf("got linkRes.entries.entry(%d) = _, false, want = true", i-neighborCacheSize+1)
}
- nudDisp.mu.Lock()
- diff := cmp.Diff(wantEvents, nudDisp.mu.events, eventDiffOpts()...)
- nudDisp.mu.events = nil
- nudDisp.mu.Unlock()
- if diff != "" {
- t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+
+ if err := addReachableEntryWithRemoved(&nudDisp, clock, linkRes, entry, []NeighborEntry{removedEntry}); err != nil {
+ t.Fatalf("addReachableEntryWithRemoved(...) = %s", err)
}
}
@@ -1282,23 +1116,27 @@ func TestNeighborCacheKeepFrequentlyUsed(t *testing.T) {
Addr: frequentlyUsedEntry.Addr,
LinkAddr: frequentlyUsedEntry.LinkAddr,
State: Reachable,
+ // Can be inferred since the frequently used entry is the first to
+ // be created and transitioned to Reachable.
+ UpdatedAtNanos: startedAt + typicalLatency.Nanoseconds(),
},
}
for i := linkRes.entries.size() - neighborCacheSize + 1; i < linkRes.entries.size(); i++ {
entry, ok := linkRes.entries.entry(i)
if !ok {
- t.Fatalf("linkRes.entries.entry(%d) not found", i)
- }
- wantEntry := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
+ t.Fatalf("got linkRes.entries.entry(%d) = _, false, want = true", i)
}
- wantUnsortedEntries = append(wantUnsortedEntries, wantEntry)
+ durationReachableNanos := int64(linkRes.entries.size()-i-1) * typicalLatency.Nanoseconds()
+ wantUnsortedEntries = append(wantUnsortedEntries, NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: clock.NowNanoseconds() - durationReachableNanos,
+ })
}
- if diff := cmp.Diff(wantUnsortedEntries, linkRes.neigh.entries(), entryDiffOptsWithSort()...); diff != "" {
+ if diff := cmp.Diff(wantUnsortedEntries, linkRes.neigh.entries(), unorderedEntriesDiffOpts()...); diff != "" {
t.Errorf("neighbor entries mismatch (-want, +got):\n%s", diff)
}
@@ -1350,17 +1188,18 @@ func TestNeighborCacheConcurrent(t *testing.T) {
for i := linkRes.entries.size() - neighborCacheSize; i < linkRes.entries.size(); i++ {
entry, ok := linkRes.entries.entry(i)
if !ok {
- t.Errorf("linkRes.entries.entry(%d) not found", i)
+ t.Errorf("got linkRes.entries.entry(%d) = _, false, want = true", i)
}
- wantEntry := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- }
- wantUnsortedEntries = append(wantUnsortedEntries, wantEntry)
+ durationReachableNanos := int64(linkRes.entries.size()-i-1) * typicalLatency.Nanoseconds()
+ wantUnsortedEntries = append(wantUnsortedEntries, NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: clock.NowNanoseconds() - durationReachableNanos,
+ })
}
- if diff := cmp.Diff(wantUnsortedEntries, linkRes.neigh.entries(), entryDiffOptsWithSort()...); diff != "" {
+ if diff := cmp.Diff(wantUnsortedEntries, linkRes.neigh.entries(), unorderedEntriesDiffOpts()...); diff != "" {
t.Errorf("neighbor entries mismatch (-want, +got):\n%s", diff)
}
}
@@ -1372,44 +1211,12 @@ func TestNeighborCacheReplace(t *testing.T) {
clock := faketime.NewManualClock()
linkRes := newTestNeighborResolver(&nudDisp, config, clock)
- // Add an entry
entry, ok := linkRes.entries.entry(0)
if !ok {
- t.Fatal("linkRes.entries.entry(0) not found")
- }
-
- _, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
- if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Err: nil}, r); diff != "" {
- t.Fatalf("got link resolution result mismatch (-want +got):\n%s", diff)
- }
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got linkRes.neigh.entry(%s, '', _, _, nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- clock.Advance(typicalLatency)
- select {
- case <-ch:
- default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _, _, nil)", entry.Addr)
+ t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
}
-
- // Verify the entry exists
- {
- e, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
- if err != nil {
- t.Errorf("unexpected error from linkRes.neigh.entry(%s, '', _, _, nil): %s", entry.Addr, err)
- }
- if t.Failed() {
- t.FailNow()
- }
- want := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
- }
- if diff := cmp.Diff(want, e, entryDiffOpts()...); diff != "" {
- t.Errorf("linkRes.neigh.entry(%s, '', _, _, nil) mismatch (-want, +got):\n%s", entry.Addr, diff)
- }
+ if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
+ t.Fatalf("addReachableEntry(...) = %s", err)
}
// Notify of a link address change
@@ -1417,7 +1224,7 @@ func TestNeighborCacheReplace(t *testing.T) {
{
entry, ok := linkRes.entries.entry(1)
if !ok {
- t.Fatal("linkRes.entries.entry(1) not found")
+ t.Fatal("got linkRes.entries.entry(1) = _, false, want = true")
}
updatedLinkAddr = entry.LinkAddr
}
@@ -1437,29 +1244,31 @@ func TestNeighborCacheReplace(t *testing.T) {
t.Fatalf("linkRes.neigh.entry(%s, '', nil): %s", entry.Addr, err)
}
want := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: updatedLinkAddr,
- State: Delay,
+ Addr: entry.Addr,
+ LinkAddr: updatedLinkAddr,
+ State: Delay,
+ UpdatedAtNanos: clock.NowNanoseconds(),
}
- if diff := cmp.Diff(want, e, entryDiffOpts()...); diff != "" {
+ if diff := cmp.Diff(want, e); diff != "" {
t.Errorf("linkRes.neigh.entry(%s, '', nil) mismatch (-want, +got):\n%s", entry.Addr, diff)
}
- clock.Advance(config.DelayFirstProbeTime + typicalLatency)
}
+ clock.Advance(config.DelayFirstProbeTime + typicalLatency)
+
// Verify that the neighbor is now reachable.
{
e, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
- clock.Advance(typicalLatency)
if err != nil {
t.Errorf("unexpected error from linkRes.neigh.entry(%s, '', nil): %s", entry.Addr, err)
}
want := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: updatedLinkAddr,
- State: Reachable,
+ Addr: entry.Addr,
+ LinkAddr: updatedLinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: clock.NowNanoseconds(),
}
- if diff := cmp.Diff(want, e, entryDiffOpts()...); diff != "" {
+ if diff := cmp.Diff(want, e); diff != "" {
t.Errorf("linkRes.neigh.entry(%s, '', nil) mismatch (-want, +got):\n%s", entry.Addr, diff)
}
}
@@ -1479,25 +1288,12 @@ func TestNeighborCacheResolutionFailed(t *testing.T) {
entry, ok := linkRes.entries.entry(0)
if !ok {
- t.Fatal("linkRes.entries.entry(0) not found")
+ t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
}
// First, sanity check that resolution is working
- {
- _, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
- if diff := cmp.Diff(LinkResolutionResult{LinkAddress: entry.LinkAddr, Err: nil}, r); diff != "" {
- t.Fatalf("got link resolution result mismatch (-want +got):\n%s", diff)
- }
- })
- if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got linkRes.neigh.entry(%s, '', _, _, nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
- }
- clock.Advance(typicalLatency)
- select {
- case <-ch:
- default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _, _, nil)", entry.Addr)
- }
+ if err := addReachableEntry(&nudDisp, clock, linkRes, entry); err != nil {
+ t.Fatalf("addReachableEntry(...) = %s", err)
}
got, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
@@ -1505,11 +1301,12 @@ func TestNeighborCacheResolutionFailed(t *testing.T) {
t.Fatalf("unexpected error from linkRes.neigh.entry(%s, '', nil): %s", entry.Addr, err)
}
want := NeighborEntry{
- Addr: entry.Addr,
- LinkAddr: entry.LinkAddr,
- State: Reachable,
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: clock.NowNanoseconds(),
}
- if diff := cmp.Diff(want, got, entryDiffOpts()...); diff != "" {
+ if diff := cmp.Diff(want, got); diff != "" {
t.Errorf("linkRes.neigh.entry(%s, '', nil) mismatch (-want, +got):\n%s", entry.Addr, diff)
}
@@ -1524,14 +1321,14 @@ func TestNeighborCacheResolutionFailed(t *testing.T) {
}
})
if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got linkRes.neigh.entry(%s, '', _, _, nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
+ t.Fatalf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
}
waitFor := config.DelayFirstProbeTime + typicalLatency*time.Duration(config.MaxMulticastProbes)
clock.Advance(waitFor)
select {
case <-ch:
default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _, _, nil)", entry.Addr)
+ t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
}
}
@@ -1555,7 +1352,7 @@ func TestNeighborCacheResolutionTimeout(t *testing.T) {
entry, ok := linkRes.entries.entry(0)
if !ok {
- t.Fatal("linkRes.entries.entry(0) not found")
+ t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
}
_, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
@@ -1564,7 +1361,7 @@ func TestNeighborCacheResolutionTimeout(t *testing.T) {
}
})
if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got linkRes.neigh.entry(%s, '', _, _, nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
+ t.Fatalf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
}
waitFor := config.RetransmitTimer * time.Duration(config.MaxMulticastProbes)
clock.Advance(waitFor)
@@ -1572,7 +1369,7 @@ func TestNeighborCacheResolutionTimeout(t *testing.T) {
select {
case <-ch:
default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _, _, nil)", entry.Addr)
+ t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
}
}
@@ -1580,14 +1377,15 @@ func TestNeighborCacheResolutionTimeout(t *testing.T) {
// failing to perform address resolution.
func TestNeighborCacheRetryResolution(t *testing.T) {
config := DefaultNUDConfigurations()
+ nudDisp := testNUDDispatcher{}
clock := faketime.NewManualClock()
- linkRes := newTestNeighborResolver(nil, config, clock)
+ linkRes := newTestNeighborResolver(&nudDisp, config, clock)
// Simulate a faulty link.
linkRes.dropReplies = true
entry, ok := linkRes.entries.entry(0)
if !ok {
- t.Fatal("linkRes.entries.entry(0) not found")
+ t.Fatal("got linkRes.entries.entry(0) = _, false, want = true ")
}
// Perform address resolution with a faulty link, which will fail.
@@ -1598,27 +1396,75 @@ func TestNeighborCacheRetryResolution(t *testing.T) {
}
})
if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- t.Fatalf("got linkRes.neigh.entry(%s, '', _, _, nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
+ t.Fatalf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
+ }
+
+ {
+ wantEvents := []testEntryEventInfo{
+ {
+ EventType: entryTestAdded,
+ NICID: 1,
+ Entry: NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: "",
+ State: Incomplete,
+ UpdatedAtNanos: clock.NowNanoseconds(),
+ },
+ },
+ }
+ nudDisp.mu.Lock()
+ diff := cmp.Diff(wantEvents, nudDisp.mu.events)
+ nudDisp.mu.events = nil
+ nudDisp.mu.Unlock()
+ if diff != "" {
+ t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
}
+
waitFor := config.RetransmitTimer * time.Duration(config.MaxMulticastProbes)
clock.Advance(waitFor)
select {
case <-ch:
default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _, _, nil)", entry.Addr)
+ t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
}
- }
- wantEntries := []NeighborEntry{
{
- Addr: entry.Addr,
- LinkAddr: "",
- State: Unreachable,
- },
- }
- if diff := cmp.Diff(linkRes.neigh.entries(), wantEntries, entryDiffOptsWithSort()...); diff != "" {
- t.Fatalf("neighbor entries mismatch (-got, +want):\n%s", diff)
+ wantEvents := []testEntryEventInfo{
+ {
+ EventType: entryTestChanged,
+ NICID: 1,
+ Entry: NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: "",
+ State: Unreachable,
+ UpdatedAtNanos: clock.NowNanoseconds(),
+ },
+ },
+ }
+ nudDisp.mu.Lock()
+ diff := cmp.Diff(wantEvents, nudDisp.mu.events)
+ nudDisp.mu.events = nil
+ nudDisp.mu.Unlock()
+ if diff != "" {
+ t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
+ }
+
+ {
+ wantEntries := []NeighborEntry{
+ {
+ Addr: entry.Addr,
+ LinkAddr: "",
+ State: Unreachable,
+ UpdatedAtNanos: clock.NowNanoseconds(),
+ },
+ }
+ if diff := cmp.Diff(linkRes.neigh.entries(), wantEntries, unorderedEntriesDiffOpts()...); diff != "" {
+ t.Fatalf("neighbor entries mismatch (-got, +want):\n%s", diff)
+ }
+ }
}
// Retry address resolution with a working link.
@@ -1635,28 +1481,74 @@ func TestNeighborCacheRetryResolution(t *testing.T) {
if incompleteEntry.State != Incomplete {
t.Fatalf("got entry.State = %s, want = %s", incompleteEntry.State, Incomplete)
}
+
+ {
+ wantEvents := []testEntryEventInfo{
+ {
+ EventType: entryTestChanged,
+ NICID: 1,
+ Entry: NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: "",
+ State: Incomplete,
+ UpdatedAtNanos: clock.NowNanoseconds(),
+ },
+ },
+ }
+ nudDisp.mu.Lock()
+ diff := cmp.Diff(wantEvents, nudDisp.mu.events)
+ nudDisp.mu.events = nil
+ nudDisp.mu.Unlock()
+ if diff != "" {
+ t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
+ }
+ }
+
clock.Advance(typicalLatency)
select {
case <-ch:
- if !ok {
- t.Fatal("expected successful address resolution")
+ default:
+ t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
+ }
+
+ {
+ wantEvents := []testEntryEventInfo{
+ {
+ EventType: entryTestChanged,
+ NICID: 1,
+ Entry: NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: clock.NowNanoseconds(),
+ },
+ },
}
- reachableEntry, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
- if err != nil {
- t.Fatalf("linkRes.neigh.entry(%s, '', _, _, nil): %v", entry.Addr, err)
+ nudDisp.mu.Lock()
+ diff := cmp.Diff(wantEvents, nudDisp.mu.events)
+ nudDisp.mu.events = nil
+ nudDisp.mu.Unlock()
+ if diff != "" {
+ t.Fatalf("nud dispatcher events mismatch (-want, +got):\n%s", diff)
}
- if reachableEntry.Addr != entry.Addr {
- t.Fatalf("got entry.Addr = %s, want = %s", reachableEntry.Addr, entry.Addr)
+ }
+
+ {
+ gotEntry, _, err := linkRes.neigh.entry(entry.Addr, "", nil)
+ if err != nil {
+ t.Fatalf("linkRes.neigh.entry(%s, '', _): %s", entry.Addr, err)
}
- if reachableEntry.LinkAddr != entry.LinkAddr {
- t.Fatalf("got entry.LinkAddr = %s, want = %s", reachableEntry.LinkAddr, entry.LinkAddr)
+
+ wantEntry := NeighborEntry{
+ Addr: entry.Addr,
+ LinkAddr: entry.LinkAddr,
+ State: Reachable,
+ UpdatedAtNanos: clock.NowNanoseconds(),
}
- if reachableEntry.State != Reachable {
- t.Fatalf("got entry.State = %s, want = %s", reachableEntry.State.String(), Reachable.String())
+ if diff := cmp.Diff(gotEntry, wantEntry); diff != "" {
+ t.Fatalf("neighbor entry mismatch (-got, +want):\n%s", diff)
}
- default:
- t.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _, _, nil)", entry.Addr)
}
}
}
@@ -1674,7 +1566,7 @@ func BenchmarkCacheClear(b *testing.B) {
for i := 0; i < cacheSize; i++ {
entry, ok := linkRes.entries.entry(i)
if !ok {
- b.Fatalf("linkRes.entries.entry(%d) not found", i)
+ b.Fatalf("got linkRes.entries.entry(%d) = _, false, want = true", i)
}
_, ch, err := linkRes.neigh.entry(entry.Addr, "", func(r LinkResolutionResult) {
@@ -1683,13 +1575,13 @@ func BenchmarkCacheClear(b *testing.B) {
}
})
if _, ok := err.(*tcpip.ErrWouldBlock); !ok {
- b.Fatalf("got linkRes.neigh.entry(%s, '', _, _, nil) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
+ b.Fatalf("got linkRes.neigh.entry(%s, '', _) = %v, want = %s", entry.Addr, err, &tcpip.ErrWouldBlock{})
}
select {
case <-ch:
default:
- b.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _, _, nil)", entry.Addr)
+ b.Fatalf("expected notification from done channel returned by linkRes.neigh.entry(%s, '', _)", entry.Addr)
}
}
diff --git a/pkg/tcpip/stack/neighbor_entry_test.go b/pkg/tcpip/stack/neighbor_entry_test.go
index baae7dfe1..bb2b2d705 100644
--- a/pkg/tcpip/stack/neighbor_entry_test.go
+++ b/pkg/tcpip/stack/neighbor_entry_test.go
@@ -18,13 +18,11 @@ import (
"fmt"
"math"
"math/rand"
- "strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
- "github.com/google/go-cmp/cmp/cmpopts"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/faketime"
"gvisor.dev/gvisor/pkg/tcpip/header"
@@ -52,23 +50,6 @@ func runImmediatelyScheduledJobs(clock *faketime.ManualClock) {
clock.Advance(immediateDuration)
}
-// eventDiffOpts are the options passed to cmp.Diff to compare entry events.
-// The UpdatedAtNanos field is ignored due to a lack of a deterministic method
-// to predict the time that an event will be dispatched.
-func eventDiffOpts() []cmp.Option {
- return []cmp.Option{
- cmpopts.IgnoreFields(NeighborEntry{}, "UpdatedAtNanos"),
- }
-}
-
-// eventDiffOptsWithSort is like eventDiffOpts but also includes an option to
-// sort slices of events for cases where ordering must be ignored.
-func eventDiffOptsWithSort() []cmp.Option {
- return append(eventDiffOpts(), cmpopts.SortSlices(func(a, b testEntryEventInfo) bool {
- return strings.Compare(string(a.Entry.Addr), string(b.Entry.Addr)) < 0
- }))
-}
-
// The following unit tests exercise every state transition and verify its
// behavior with RFC 4681 and RFC 7048.
//
diff --git a/pkg/tcpip/stack/nic.go b/pkg/tcpip/stack/nic.go
index f9323d545..62f7c880e 100644
--- a/pkg/tcpip/stack/nic.go
+++ b/pkg/tcpip/stack/nic.go
@@ -725,12 +725,12 @@ func (n *nic) DeliverNetworkPacket(remote, local tcpip.LinkAddress, protocol tcp
n.mu.RUnlock()
n.stats.DisabledRx.Packets.Increment()
- n.stats.DisabledRx.Bytes.IncrementBy(uint64(pkt.Data.Size()))
+ n.stats.DisabledRx.Bytes.IncrementBy(uint64(pkt.Data().Size()))
return
}
n.stats.Rx.Packets.Increment()
- n.stats.Rx.Bytes.IncrementBy(uint64(pkt.Data.Size()))
+ n.stats.Rx.Bytes.IncrementBy(uint64(pkt.Data().Size()))
networkEndpoint, ok := n.networkEndpoints[protocol]
if !ok {
@@ -881,7 +881,7 @@ func (n *nic) DeliverTransportError(local, remote tcpip.Address, net tcpip.Netwo
// ICMPv4 only guarantees that 8 bytes of the transport protocol will
// be present in the payload. We know that the ports are within the
// first 8 bytes for all known transport protocols.
- transHeader, ok := pkt.Data.PullUp(8)
+ transHeader, ok := pkt.Data().PullUp(8)
if !ok {
return
}
diff --git a/pkg/tcpip/stack/packet_buffer.go b/pkg/tcpip/stack/packet_buffer.go
index 4f013b212..8f288675d 100644
--- a/pkg/tcpip/stack/packet_buffer.go
+++ b/pkg/tcpip/stack/packet_buffer.go
@@ -59,7 +59,7 @@ type PacketBuffer struct {
// PacketBuffers.
PacketBufferEntry
- // Data holds the payload of the packet.
+ // data holds the payload of the packet.
//
// For inbound packets, Data is initially the whole packet. Then gets moved to
// headers via PacketHeader.Consume, when the packet is being parsed.
@@ -69,7 +69,7 @@ type PacketBuffer struct {
//
// The bytes backing Data are immutable, a.k.a. users shouldn't write to its
// backing storage.
- Data buffer.VectorisedView
+ data buffer.VectorisedView
// headers stores metadata about each header.
headers [numHeaderType]headerInfo
@@ -127,7 +127,7 @@ type PacketBuffer struct {
// NewPacketBuffer creates a new PacketBuffer with opts.
func NewPacketBuffer(opts PacketBufferOptions) *PacketBuffer {
pk := &PacketBuffer{
- Data: opts.Data,
+ data: opts.Data,
}
if opts.ReserveHeaderBytes != 0 {
pk.header = buffer.NewPrependable(opts.ReserveHeaderBytes)
@@ -184,13 +184,18 @@ func (pk *PacketBuffer) HeaderSize() int {
// Size returns the size of packet in bytes.
func (pk *PacketBuffer) Size() int {
- return pk.HeaderSize() + pk.Data.Size()
+ return pk.HeaderSize() + pk.data.Size()
}
// MemSize returns the estimation size of the pk in memory, including backing
// buffer data.
func (pk *PacketBuffer) MemSize() int {
- return pk.HeaderSize() + pk.Data.MemSize() + packetBufferStructSize
+ return pk.HeaderSize() + pk.data.MemSize() + packetBufferStructSize
+}
+
+// Data returns the handle to data portion of pk.
+func (pk *PacketBuffer) Data() PacketData {
+ return PacketData{pk: pk}
}
// Views returns the underlying storage of the whole packet.
@@ -204,7 +209,7 @@ func (pk *PacketBuffer) Views() []buffer.View {
}
}
- dataViews := pk.Data.Views()
+ dataViews := pk.data.Views()
var vs []buffer.View
if useHeader {
@@ -242,11 +247,11 @@ func (pk *PacketBuffer) consume(typ headerType, size int) (v buffer.View, consum
if h.buf != nil {
panic(fmt.Sprintf("consume must not be called twice: type %s", typ))
}
- v, ok := pk.Data.PullUp(size)
+ v, ok := pk.data.PullUp(size)
if !ok {
return
}
- pk.Data.TrimFront(size)
+ pk.data.TrimFront(size)
h.buf = v
return h.buf, true
}
@@ -258,7 +263,7 @@ func (pk *PacketBuffer) consume(typ headerType, size int) (v buffer.View, consum
func (pk *PacketBuffer) Clone() *PacketBuffer {
return &PacketBuffer{
PacketBufferEntry: pk.PacketBufferEntry,
- Data: pk.Data.Clone(nil),
+ data: pk.data.Clone(nil),
headers: pk.headers,
header: pk.header,
Hash: pk.Hash,
@@ -339,13 +344,234 @@ func (h PacketHeader) Consume(size int) (v buffer.View, consumed bool) {
return h.pk.consume(h.typ, size)
}
+// PacketData represents the data portion of a PacketBuffer.
+type PacketData struct {
+ pk *PacketBuffer
+}
+
+// PullUp returns a contiguous view of size bytes from the beginning of d.
+// Callers should not write to or keep the view for later use.
+func (d PacketData) PullUp(size int) (buffer.View, bool) {
+ return d.pk.data.PullUp(size)
+}
+
+// TrimFront removes count from the beginning of d. It panics if count >
+// d.Size().
+func (d PacketData) TrimFront(count int) {
+ d.pk.data.TrimFront(count)
+}
+
+// CapLength reduces d to at most length bytes.
+func (d PacketData) CapLength(length int) {
+ d.pk.data.CapLength(length)
+}
+
+// Views returns the underlying storage of d in a slice of Views. Caller should
+// not modify the returned slice.
+func (d PacketData) Views() []buffer.View {
+ return d.pk.data.Views()
+}
+
+// AppendView appends v into d, taking the ownership of v.
+func (d PacketData) AppendView(v buffer.View) {
+ d.pk.data.AppendView(v)
+}
+
+// ReadFromData moves at most count bytes from the beginning of srcData to the
+// end of d and returns the number of bytes moved.
+func (d PacketData) ReadFromData(srcData PacketData, count int) int {
+ return srcData.pk.data.ReadToVV(&d.pk.data, count)
+}
+
+// ReadFromVV moves at most count bytes from the beginning of srcVV to the end
+// of d and returns the number of bytes moved.
+func (d PacketData) ReadFromVV(srcVV *buffer.VectorisedView, count int) int {
+ return srcVV.ReadToVV(&d.pk.data, count)
+}
+
+// Size returns the number of bytes in the data payload of the packet.
+func (d PacketData) Size() int {
+ return d.pk.data.Size()
+}
+
+// AsRange returns a Range representing the current data payload of the packet.
+func (d PacketData) AsRange() Range {
+ return Range{
+ pk: d.pk,
+ offset: d.pk.HeaderSize(),
+ length: d.Size(),
+ }
+}
+
+// ExtractVV returns a VectorisedView of d. This method has the semantic to
+// destruct the underlying packet, hence the packet cannot be used again.
+//
+// This method exists for compatibility between PacketBuffer and VectorisedView.
+// It may be removed later and should be used with care.
+func (d PacketData) ExtractVV() buffer.VectorisedView {
+ return d.pk.data
+}
+
+// Replace replaces the data portion of the packet with vv, taking the ownership
+// of vv.
+//
+// This method exists for compatibility between PacketBuffer and VectorisedView.
+// It may be removed later and should be used with care.
+func (d PacketData) Replace(vv buffer.VectorisedView) {
+ d.pk.data = vv
+}
+
+// Range represents a contiguous subportion of a PacketBuffer.
+type Range struct {
+ pk *PacketBuffer
+ offset int
+ length int
+}
+
+// Size returns the number of bytes in r.
+func (r Range) Size() int {
+ return r.length
+}
+
+// SubRange returns a new Range starting at off bytes of r. It returns an empty
+// range if off is out-of-bounds.
+func (r Range) SubRange(off int) Range {
+ if off > r.length {
+ return Range{pk: r.pk}
+ }
+ return Range{
+ pk: r.pk,
+ offset: r.offset + off,
+ length: r.length - off,
+ }
+}
+
+// Capped returns a new Range with the same starting point of r and length
+// capped at max.
+func (r Range) Capped(max int) Range {
+ if r.length <= max {
+ return r
+ }
+ return Range{
+ pk: r.pk,
+ offset: r.offset,
+ length: max,
+ }
+}
+
+// AsView returns the backing storage of r if possible. It will allocate a new
+// View if r spans multiple pieces internally. Caller should not write to the
+// returned View in any way.
+func (r Range) AsView() buffer.View {
+ var allocated bool
+ var v buffer.View
+ r.iterate(func(b []byte) {
+ if v == nil {
+ // v has not been assigned, allowing first view to be returned.
+ v = b
+ } else {
+ // v has been assigned. This range spans more than a view, a new view
+ // needs to be allocated.
+ if !allocated {
+ allocated = true
+ all := make([]byte, 0, r.length)
+ all = append(all, v...)
+ v = all
+ }
+ v = append(v, b...)
+ }
+ })
+ return v
+}
+
+// ToOwnedView returns a owned copy of data in r.
+func (r Range) ToOwnedView() buffer.View {
+ if r.length == 0 {
+ return nil
+ }
+ all := make([]byte, 0, r.length)
+ r.iterate(func(b []byte) {
+ all = append(all, b...)
+ })
+ return all
+}
+
+// Checksum calculates the RFC 1071 checksum for the underlying bytes of r.
+func (r Range) Checksum() uint16 {
+ var c header.Checksumer
+ r.iterate(c.Add)
+ return c.Checksum()
+}
+
+// iterate calls fn for each piece in r. fn is always called with a non-empty
+// slice.
+func (r Range) iterate(fn func([]byte)) {
+ w := window{
+ offset: r.offset,
+ length: r.length,
+ }
+ // Header portion.
+ for i := range r.pk.headers {
+ if b := w.process(r.pk.headers[i].buf); len(b) > 0 {
+ fn(b)
+ }
+ if w.isDone() {
+ break
+ }
+ }
+ // Data portion.
+ if !w.isDone() {
+ for _, v := range r.pk.data.Views() {
+ if b := w.process(v); len(b) > 0 {
+ fn(b)
+ }
+ if w.isDone() {
+ break
+ }
+ }
+ }
+}
+
+// window represents contiguous region of byte stream. User would call process()
+// to input bytes, and obtain a subslice that is inside the window.
+type window struct {
+ offset int
+ length int
+}
+
+// isDone returns true if the window has passed and further process() calls will
+// always return an empty slice. This can be used to end processing early.
+func (w *window) isDone() bool {
+ return w.length == 0
+}
+
+// process feeds b in and returns a subslice that is inside the window. The
+// returned slice will be a subslice of b, and it does not keep b after method
+// returns. This method may return an empty slice if nothing in b is inside the
+// window.
+func (w *window) process(b []byte) (inWindow []byte) {
+ if w.offset >= len(b) {
+ w.offset -= len(b)
+ return nil
+ }
+ if w.offset > 0 {
+ b = b[w.offset:]
+ w.offset = 0
+ }
+ if w.length < len(b) {
+ b = b[:w.length]
+ }
+ w.length -= len(b)
+ return b
+}
+
// PayloadSince returns packet payload starting from and including a particular
// header.
//
// The returned View is owned by the caller - its backing buffer is separate
// from the packet header's underlying packet buffer.
func PayloadSince(h PacketHeader) buffer.View {
- size := h.pk.Data.Size()
+ size := h.pk.data.Size()
for _, hinfo := range h.pk.headers[h.typ:] {
size += len(hinfo.buf)
}
@@ -356,7 +582,7 @@ func PayloadSince(h PacketHeader) buffer.View {
v = append(v, hinfo.buf...)
}
- for _, view := range h.pk.Data.Views() {
+ for _, view := range h.pk.data.Views() {
v = append(v, view...)
}
diff --git a/pkg/tcpip/stack/packet_buffer_test.go b/pkg/tcpip/stack/packet_buffer_test.go
index c6fa8da5f..6728370c3 100644
--- a/pkg/tcpip/stack/packet_buffer_test.go
+++ b/pkg/tcpip/stack/packet_buffer_test.go
@@ -15,9 +15,11 @@ package stack
import (
"bytes"
+ "fmt"
"testing"
"gvisor.dev/gvisor/pkg/tcpip/buffer"
+ "gvisor.dev/gvisor/pkg/tcpip/header"
)
func TestPacketHeaderPush(t *testing.T) {
@@ -110,7 +112,7 @@ func TestPacketHeaderPush(t *testing.T) {
if got, want := pk.Size(), allHdrSize+len(test.data); got != want {
t.Errorf("After pk.Size() = %d, want %d", got, want)
}
- checkViewEqual(t, "After pk.Data.Views()", concatViews(pk.Data.Views()...), test.data)
+ checkData(t, pk, test.data)
checkViewEqual(t, "After pk.Views()", concatViews(pk.Views()...),
concatViews(test.link, test.network, test.transport, test.data))
// Check the after values for each header.
@@ -204,7 +206,7 @@ func TestPacketHeaderConsume(t *testing.T) {
transport = test.data[test.link+test.network:][:test.transport]
payload = test.data[allHdrSize:]
)
- checkViewEqual(t, "After pk.Data.Views()", concatViews(pk.Data.Views()...), payload)
+ checkData(t, pk, payload)
checkViewEqual(t, "After pk.Views()", concatViews(pk.Views()...), test.data)
// Check the after values for each header.
checkPacketHeader(t, "After pk.LinkHeader", pk.LinkHeader(), link)
@@ -340,6 +342,158 @@ func TestPacketHeaderConsumeThenPushPanics(t *testing.T) {
}
}
+func TestPacketBufferData(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ makePkt func(*testing.T) *PacketBuffer
+ data string
+ }{
+ {
+ name: "inbound packet",
+ makePkt: func(*testing.T) *PacketBuffer {
+ pkt := NewPacketBuffer(PacketBufferOptions{
+ Data: vv("aabbbbccccccDATA"),
+ })
+ pkt.LinkHeader().Consume(2)
+ pkt.NetworkHeader().Consume(4)
+ pkt.TransportHeader().Consume(6)
+ return pkt
+ },
+ data: "DATA",
+ },
+ {
+ name: "outbound packet",
+ makePkt: func(*testing.T) *PacketBuffer {
+ pkt := NewPacketBuffer(PacketBufferOptions{
+ ReserveHeaderBytes: 12,
+ Data: vv("DATA"),
+ })
+ copy(pkt.TransportHeader().Push(6), []byte("cccccc"))
+ copy(pkt.NetworkHeader().Push(4), []byte("bbbb"))
+ copy(pkt.LinkHeader().Push(2), []byte("aa"))
+ return pkt
+ },
+ data: "DATA",
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ // PullUp
+ for _, n := range []int{1, len(tc.data)} {
+ t.Run(fmt.Sprintf("PullUp%d", n), func(t *testing.T) {
+ pkt := tc.makePkt(t)
+ v, ok := pkt.Data().PullUp(n)
+ wantV := []byte(tc.data)[:n]
+ if !ok || !bytes.Equal(v, wantV) {
+ t.Errorf("pkt.Data().PullUp(%d) = %q, %t; want %q, true", n, v, ok, wantV)
+ }
+ })
+ }
+ t.Run("PullUpOutOfBounds", func(t *testing.T) {
+ n := len(tc.data) + 1
+ pkt := tc.makePkt(t)
+ v, ok := pkt.Data().PullUp(n)
+ if ok || v != nil {
+ t.Errorf("pkt.Data().PullUp(%d) = %q, %t; want nil, false", n, v, ok)
+ }
+ })
+
+ // TrimFront
+ for _, n := range []int{1, len(tc.data)} {
+ t.Run(fmt.Sprintf("TrimFront%d", n), func(t *testing.T) {
+ pkt := tc.makePkt(t)
+ pkt.Data().TrimFront(n)
+
+ checkData(t, pkt, []byte(tc.data)[n:])
+ })
+ }
+
+ // CapLength
+ for _, n := range []int{0, 1, len(tc.data)} {
+ t.Run(fmt.Sprintf("CapLength%d", n), func(t *testing.T) {
+ pkt := tc.makePkt(t)
+ pkt.Data().CapLength(n)
+
+ want := []byte(tc.data)
+ if n < len(want) {
+ want = want[:n]
+ }
+ checkData(t, pkt, want)
+ })
+ }
+
+ // Views
+ t.Run("Views", func(t *testing.T) {
+ pkt := tc.makePkt(t)
+ checkData(t, pkt, []byte(tc.data))
+ })
+
+ // AppendView
+ t.Run("AppendView", func(t *testing.T) {
+ s := "APPEND"
+
+ pkt := tc.makePkt(t)
+ pkt.Data().AppendView(buffer.View(s))
+
+ checkData(t, pkt, []byte(tc.data+s))
+ })
+
+ // ReadFromData/VV
+ for _, n := range []int{0, 1, 2, 7, 10, 14, 20} {
+ t.Run(fmt.Sprintf("ReadFromData%d", n), func(t *testing.T) {
+ s := "TO READ"
+ otherPkt := NewPacketBuffer(PacketBufferOptions{
+ Data: vv(s, s),
+ })
+ s += s
+
+ pkt := tc.makePkt(t)
+ pkt.Data().ReadFromData(otherPkt.Data(), n)
+
+ if n < len(s) {
+ s = s[:n]
+ }
+ checkData(t, pkt, []byte(tc.data+s))
+ })
+ t.Run(fmt.Sprintf("ReadFromVV%d", n), func(t *testing.T) {
+ s := "TO READ"
+ srcVV := vv(s, s)
+ s += s
+
+ pkt := tc.makePkt(t)
+ pkt.Data().ReadFromVV(&srcVV, n)
+
+ if n < len(s) {
+ s = s[:n]
+ }
+ checkData(t, pkt, []byte(tc.data+s))
+ })
+ }
+
+ // ExtractVV
+ t.Run("ExtractVV", func(t *testing.T) {
+ pkt := tc.makePkt(t)
+ extractedVV := pkt.Data().ExtractVV()
+
+ got := extractedVV.ToOwnedView()
+ want := []byte(tc.data)
+ if !bytes.Equal(got, want) {
+ t.Errorf("pkt.Data().ExtractVV().ToOwnedView() = %q, want %q", got, want)
+ }
+ })
+
+ // Replace
+ t.Run("Replace", func(t *testing.T) {
+ s := "REPLACED"
+
+ pkt := tc.makePkt(t)
+ pkt.Data().Replace(vv(s))
+
+ checkData(t, pkt, []byte(s))
+ })
+ })
+ }
+}
+
func checkInitialPacketBuffer(t *testing.T, pk *PacketBuffer, opts PacketBufferOptions) {
t.Helper()
reserved := opts.ReserveHeaderBytes
@@ -356,7 +510,7 @@ func checkInitialPacketBuffer(t *testing.T, pk *PacketBuffer, opts PacketBufferO
if got, want := pk.Size(), len(data); got != want {
t.Errorf("Initial pk.Size() = %d, want %d", got, want)
}
- checkViewEqual(t, "Initial pk.Data.Views()", concatViews(pk.Data.Views()...), data)
+ checkData(t, pk, data)
checkViewEqual(t, "Initial pk.Views()", concatViews(pk.Views()...), data)
// Check the initial values for each header.
checkPacketHeader(t, "Initial pk.LinkHeader", pk.LinkHeader(), nil)
@@ -383,6 +537,70 @@ func checkViewEqual(t *testing.T, what string, got, want buffer.View) {
}
}
+func checkData(t *testing.T, pkt *PacketBuffer, want []byte) {
+ t.Helper()
+ if got := concatViews(pkt.Data().Views()...); !bytes.Equal(got, want) {
+ t.Errorf("pkt.Data().Views() = %x, want %x", got, want)
+ }
+ if got := pkt.Data().Size(); got != len(want) {
+ t.Errorf("pkt.Data().Size() = %d, want %d", got, len(want))
+ }
+
+ t.Run("AsRange", func(t *testing.T) {
+ // Full range
+ checkRange(t, pkt.Data().AsRange(), want)
+
+ // SubRange
+ for _, off := range []int{0, 1, len(want), len(want) + 1} {
+ t.Run(fmt.Sprintf("SubRange%d", off), func(t *testing.T) {
+ // Empty when off is greater than the size of range.
+ var sub []byte
+ if off < len(want) {
+ sub = want[off:]
+ }
+ checkRange(t, pkt.Data().AsRange().SubRange(off), sub)
+ })
+ }
+
+ // Capped
+ for _, n := range []int{0, 1, len(want), len(want) + 1} {
+ t.Run(fmt.Sprintf("Capped%d", n), func(t *testing.T) {
+ sub := want
+ if n < len(sub) {
+ sub = sub[:n]
+ }
+ checkRange(t, pkt.Data().AsRange().Capped(n), sub)
+ })
+ }
+ })
+}
+
+func checkRange(t *testing.T, r Range, data []byte) {
+ if got, want := r.Size(), len(data); got != want {
+ t.Errorf("r.Size() = %d, want %d", got, want)
+ }
+ if got := r.AsView(); !bytes.Equal(got, data) {
+ t.Errorf("r.AsView() = %x, want %x", got, data)
+ }
+ if got := r.ToOwnedView(); !bytes.Equal(got, data) {
+ t.Errorf("r.ToOwnedView() = %x, want %x", got, data)
+ }
+ if got, want := r.Checksum(), header.Checksum(data, 0 /* initial */); got != want {
+ t.Errorf("r.Checksum() = %x, want %x", got, want)
+ }
+}
+
+func vv(pieces ...string) buffer.VectorisedView {
+ var views []buffer.View
+ var size int
+ for _, p := range pieces {
+ v := buffer.View([]byte(p))
+ size += len(v)
+ views = append(views, v)
+ }
+ return buffer.NewVectorisedView(size, views)
+}
+
func makeView(size int) buffer.View {
b := byte(size)
return bytes.Repeat([]byte{b}, size)
diff --git a/pkg/tcpip/stack/registration.go b/pkg/tcpip/stack/registration.go
index 43e9e4beb..85f0f471a 100644
--- a/pkg/tcpip/stack/registration.go
+++ b/pkg/tcpip/stack/registration.go
@@ -852,18 +852,46 @@ type InjectableLinkEndpoint interface {
InjectOutbound(dest tcpip.Address, packet []byte) tcpip.Error
}
-// DADResult is the result of a duplicate address detection process.
-type DADResult struct {
- // Resolved is true when DAD completed without detecting a duplicate address
- // on the link.
- //
- // Ignored when Err is non-nil.
- Resolved bool
+// DADResult is a marker interface for the result of a duplicate address
+// detection process.
+type DADResult interface {
+ isDADResult()
+}
+
+var _ DADResult = (*DADSucceeded)(nil)
+
+// DADSucceeded indicates DAD completed without finding any duplicate addresses.
+type DADSucceeded struct{}
- // Err is an error encountered while performing DAD.
+func (*DADSucceeded) isDADResult() {}
+
+var _ DADResult = (*DADError)(nil)
+
+// DADError indicates DAD hit an error.
+type DADError struct {
Err tcpip.Error
}
+func (*DADError) isDADResult() {}
+
+var _ DADResult = (*DADAborted)(nil)
+
+// DADAborted indicates DAD was aborted.
+type DADAborted struct{}
+
+func (*DADAborted) isDADResult() {}
+
+var _ DADResult = (*DADDupAddrDetected)(nil)
+
+// DADDupAddrDetected indicates DAD detected a duplicate address.
+type DADDupAddrDetected struct {
+ // HolderLinkAddress is the link address of the node that holds the duplicate
+ // address.
+ HolderLinkAddress tcpip.LinkAddress
+}
+
+func (*DADDupAddrDetected) isDADResult() {}
+
// DADCompletionHandler is a handler for DAD completion.
type DADCompletionHandler func(DADResult)
diff --git a/pkg/tcpip/stack/stack.go b/pkg/tcpip/stack/stack.go
index de94ddfda..53370c354 100644
--- a/pkg/tcpip/stack/stack.go
+++ b/pkg/tcpip/stack/stack.go
@@ -813,6 +813,18 @@ func (s *Stack) Forwarding(protocolNum tcpip.NetworkProtocolNumber) bool {
return forwardingProtocol.Forwarding()
}
+// PortRange returns the UDP and TCP inclusive range of ephemeral ports used in
+// both IPv4 and IPv6.
+func (s *Stack) PortRange() (uint16, uint16) {
+ return s.PortManager.PortRange()
+}
+
+// SetPortRange sets the UDP and TCP IPv4 and IPv6 ephemeral port range
+// (inclusive).
+func (s *Stack) SetPortRange(start uint16, end uint16) tcpip.Error {
+ return s.PortManager.SetPortRange(start, end)
+}
+
// SetRouteTable assigns the route table to be used by this stack. It
// specifies which NIC to use for given destination address ranges.
//
diff --git a/pkg/tcpip/stack/stack_test.go b/pkg/tcpip/stack/stack_test.go
index 8e39e828c..880219007 100644
--- a/pkg/tcpip/stack/stack_test.go
+++ b/pkg/tcpip/stack/stack_test.go
@@ -137,11 +137,11 @@ func (f *fakeNetworkEndpoint) HandlePacket(pkt *stack.PacketBuffer) {
// Handle control packets.
if netHdr[protocolNumberOffset] == uint8(fakeControlProtocol) {
- nb, ok := pkt.Data.PullUp(fakeNetHeaderLen)
+ nb, ok := pkt.Data().PullUp(fakeNetHeaderLen)
if !ok {
return
}
- pkt.Data.TrimFront(fakeNetHeaderLen)
+ pkt.Data().TrimFront(fakeNetHeaderLen)
f.dispatcher.DeliverTransportError(
tcpip.Address(nb[srcAddrOffset:srcAddrOffset+1]),
tcpip.Address(nb[dstAddrOffset:dstAddrOffset+1]),
@@ -2605,7 +2605,7 @@ func TestNICAutoGenAddrDoesDAD(t *testing.T) {
// means something is wrong.
t.Fatal("timed out waiting for DAD resolution")
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, linkLocalAddr, true, nil); diff != "" {
+ if diff := checkDADEvent(e, nicID, linkLocalAddr, &stack.DADSucceeded{}); diff != "" {
t.Errorf("dad event mismatch (-want +got):\n%s", diff)
}
}
@@ -3289,7 +3289,7 @@ func TestDoDADWhenNICEnabled(t *testing.T) {
case <-time.After(dadTransmits*retransmitTimer + defaultAsyncPositiveEventTimeout):
t.Fatal("timed out waiting for DAD resolution")
case e := <-ndpDisp.dadC:
- if diff := checkDADEvent(e, nicID, addr.AddressWithPrefix.Address, true, nil); diff != "" {
+ if diff := checkDADEvent(e, nicID, addr.AddressWithPrefix.Address, &stack.DADSucceeded{}); diff != "" {
t.Errorf("dad event mismatch (-want +got):\n%s", diff)
}
}
@@ -4294,7 +4294,7 @@ func TestWritePacketToRemote(t *testing.T) {
if pkt.Route.RemoteLinkAddress != linkAddr2 {
t.Fatalf("pkt.Route.RemoteAddress = %s, want %s", pkt.Route.RemoteLinkAddress, linkAddr2)
}
- if diff := cmp.Diff(pkt.Pkt.Data.ToView(), buffer.View(test.payload)); diff != "" {
+ if diff := cmp.Diff(pkt.Pkt.Data().AsRange().ToOwnedView(), buffer.View(test.payload)); diff != "" {
t.Errorf("pkt.Pkt.Data mismatch (-want +got):\n%s", diff)
}
})
diff --git a/pkg/tcpip/stack/transport_demuxer.go b/pkg/tcpip/stack/transport_demuxer.go
index e799f9290..e188efccb 100644
--- a/pkg/tcpip/stack/transport_demuxer.go
+++ b/pkg/tcpip/stack/transport_demuxer.go
@@ -359,7 +359,7 @@ func selectEndpoint(id TransportEndpointID, mpep *multiPortEndpoint, seed uint32
return mpep.endpoints[0]
}
- if mpep.flags.IntersectionRefs().ToFlags().Effective().MostRecent {
+ if mpep.flags.SharedFlags().ToFlags().Effective().MostRecent {
return mpep.endpoints[len(mpep.endpoints)-1]
}
@@ -410,7 +410,7 @@ func (ep *multiPortEndpoint) singleRegisterEndpoint(t TransportEndpoint, flags p
if len(ep.endpoints) != 0 {
// If it was previously bound, we need to check if we can bind again.
- if ep.flags.TotalRefs() > 0 && bits&ep.flags.IntersectionRefs() == 0 {
+ if ep.flags.TotalRefs() > 0 && bits&ep.flags.SharedFlags() == 0 {
return &tcpip.ErrPortInUse{}
}
}
@@ -429,7 +429,7 @@ func (ep *multiPortEndpoint) singleCheckEndpoint(flags ports.Flags) tcpip.Error
if len(ep.endpoints) != 0 {
// If it was previously bound, we need to check if we can bind again.
- if ep.flags.TotalRefs() > 0 && bits&ep.flags.IntersectionRefs() == 0 {
+ if ep.flags.TotalRefs() > 0 && bits&ep.flags.SharedFlags() == 0 {
return &tcpip.ErrPortInUse{}
}
}
diff --git a/pkg/tcpip/tcpip.go b/pkg/tcpip/tcpip.go
index 01a4389e3..87ea09a5e 100644
--- a/pkg/tcpip/tcpip.go
+++ b/pkg/tcpip/tcpip.go
@@ -1258,44 +1258,38 @@ func (m *MultiCounterStat) IncrementBy(v uint64) {
type ICMPv4PacketStats struct {
// LINT.IfChange(ICMPv4PacketStats)
- // Echo is the total number of ICMPv4 echo packets counted.
- Echo *StatCounter
+ // EchoRequest is the number of ICMPv4 echo packets counted.
+ EchoRequest *StatCounter
- // EchoReply is the total number of ICMPv4 echo reply packets counted.
+ // EchoReply is the number of ICMPv4 echo reply packets counted.
EchoReply *StatCounter
- // DstUnreachable is the total number of ICMPv4 destination unreachable
- // packets counted.
+ // DstUnreachable is the number of ICMPv4 destination unreachable packets
+ // counted.
DstUnreachable *StatCounter
- // SrcQuench is the total number of ICMPv4 source quench packets
- // counted.
+ // SrcQuench is the number of ICMPv4 source quench packets counted.
SrcQuench *StatCounter
- // Redirect is the total number of ICMPv4 redirect packets counted.
+ // Redirect is the number of ICMPv4 redirect packets counted.
Redirect *StatCounter
- // TimeExceeded is the total number of ICMPv4 time exceeded packets
- // counted.
+ // TimeExceeded is the number of ICMPv4 time exceeded packets counted.
TimeExceeded *StatCounter
- // ParamProblem is the total number of ICMPv4 parameter problem packets
- // counted.
+ // ParamProblem is the number of ICMPv4 parameter problem packets counted.
ParamProblem *StatCounter
- // Timestamp is the total number of ICMPv4 timestamp packets counted.
+ // Timestamp is the number of ICMPv4 timestamp packets counted.
Timestamp *StatCounter
- // TimestampReply is the total number of ICMPv4 timestamp reply packets
- // counted.
+ // TimestampReply is the number of ICMPv4 timestamp reply packets counted.
TimestampReply *StatCounter
- // InfoRequest is the total number of ICMPv4 information request
- // packets counted.
+ // InfoRequest is the number of ICMPv4 information request packets counted.
InfoRequest *StatCounter
- // InfoReply is the total number of ICMPv4 information reply packets
- // counted.
+ // InfoReply is the number of ICMPv4 information reply packets counted.
InfoReply *StatCounter
// LINT.ThenChange(network/ipv4/stats.go:multiCounterICMPv4PacketStats)
@@ -1307,12 +1301,11 @@ type ICMPv4SentPacketStats struct {
ICMPv4PacketStats
- // Dropped is the total number of ICMPv4 packets dropped due to link
- // layer errors.
+ // Dropped is the number of ICMPv4 packets dropped due to link layer errors.
Dropped *StatCounter
- // RateLimited is the total number of ICMPv4 packets dropped due to
- // rate limit being exceeded.
+ // RateLimited is the number of ICMPv4 packets dropped due to rate limit being
+ // exceeded.
RateLimited *StatCounter
// LINT.ThenChange(network/ipv4/stats.go:multiCounterICMPv4SentPacketStats)
@@ -1324,7 +1317,7 @@ type ICMPv4ReceivedPacketStats struct {
ICMPv4PacketStats
- // Invalid is the total number of invalid ICMPv4 packets received.
+ // Invalid is the number of invalid ICMPv4 packets received.
Invalid *StatCounter
// LINT.ThenChange(network/ipv4/stats.go:multiCounterICMPv4ReceivedPacketStats)
@@ -1347,59 +1340,50 @@ type ICMPv4Stats struct {
type ICMPv6PacketStats struct {
// LINT.IfChange(ICMPv6PacketStats)
- // EchoRequest is the total number of ICMPv6 echo request packets
- // counted.
+ // EchoRequest is the number of ICMPv6 echo request packets counted.
EchoRequest *StatCounter
- // EchoReply is the total number of ICMPv6 echo reply packets counted.
+ // EchoReply is the number of ICMPv6 echo reply packets counted.
EchoReply *StatCounter
- // DstUnreachable is the total number of ICMPv6 destination unreachable
- // packets counted.
+ // DstUnreachable is the number of ICMPv6 destination unreachable packets
+ // counted.
DstUnreachable *StatCounter
- // PacketTooBig is the total number of ICMPv6 packet too big packets
- // counted.
+ // PacketTooBig is the number of ICMPv6 packet too big packets counted.
PacketTooBig *StatCounter
- // TimeExceeded is the total number of ICMPv6 time exceeded packets
- // counted.
+ // TimeExceeded is the number of ICMPv6 time exceeded packets counted.
TimeExceeded *StatCounter
- // ParamProblem is the total number of ICMPv6 parameter problem packets
- // counted.
+ // ParamProblem is the number of ICMPv6 parameter problem packets counted.
ParamProblem *StatCounter
- // RouterSolicit is the total number of ICMPv6 router solicit packets
- // counted.
+ // RouterSolicit is the number of ICMPv6 router solicit packets counted.
RouterSolicit *StatCounter
- // RouterAdvert is the total number of ICMPv6 router advert packets
- // counted.
+ // RouterAdvert is the number of ICMPv6 router advert packets counted.
RouterAdvert *StatCounter
- // NeighborSolicit is the total number of ICMPv6 neighbor solicit
- // packets counted.
+ // NeighborSolicit is the number of ICMPv6 neighbor solicit packets counted.
NeighborSolicit *StatCounter
- // NeighborAdvert is the total number of ICMPv6 neighbor advert packets
- // counted.
+ // NeighborAdvert is the number of ICMPv6 neighbor advert packets counted.
NeighborAdvert *StatCounter
- // RedirectMsg is the total number of ICMPv6 redirect message packets
- // counted.
+ // RedirectMsg is the number of ICMPv6 redirect message packets counted.
RedirectMsg *StatCounter
- // MulticastListenerQuery is the total number of Multicast Listener Query
- // messages counted.
+ // MulticastListenerQuery is the number of Multicast Listener Query messages
+ // counted.
MulticastListenerQuery *StatCounter
- // MulticastListenerReport is the total number of Multicast Listener Report
- // messages counted.
+ // MulticastListenerReport is the number of Multicast Listener Report messages
+ // counted.
MulticastListenerReport *StatCounter
- // MulticastListenerDone is the total number of Multicast Listener Done
- // messages counted.
+ // MulticastListenerDone is the number of Multicast Listener Done messages
+ // counted.
MulticastListenerDone *StatCounter
// LINT.ThenChange(network/ipv6/stats.go:multiCounterICMPv6PacketStats)
@@ -1411,12 +1395,11 @@ type ICMPv6SentPacketStats struct {
ICMPv6PacketStats
- // Dropped is the total number of ICMPv6 packets dropped due to link
- // layer errors.
+ // Dropped is the number of ICMPv6 packets dropped due to link layer errors.
Dropped *StatCounter
- // RateLimited is the total number of ICMPv6 packets dropped due to
- // rate limit being exceeded.
+ // RateLimited is the number of ICMPv6 packets dropped due to rate limit being
+ // exceeded.
RateLimited *StatCounter
// LINT.ThenChange(network/ipv6/stats.go:multiCounterICMPv6SentPacketStats)
@@ -1428,15 +1411,15 @@ type ICMPv6ReceivedPacketStats struct {
ICMPv6PacketStats
- // Unrecognized is the total number of ICMPv6 packets received that the
- // transport layer does not know how to parse.
+ // Unrecognized is the number of ICMPv6 packets received that the transport
+ // layer does not know how to parse.
Unrecognized *StatCounter
- // Invalid is the total number of invalid ICMPv6 packets received.
+ // Invalid is the number of invalid ICMPv6 packets received.
Invalid *StatCounter
- // RouterOnlyPacketsDroppedByHost is the total number of ICMPv6 packets
- // dropped due to being router-specific packets.
+ // RouterOnlyPacketsDroppedByHost is the number of ICMPv6 packets dropped due
+ // to being router-specific packets.
RouterOnlyPacketsDroppedByHost *StatCounter
// LINT.ThenChange(network/ipv6/stats.go:multiCounterICMPv6ReceivedPacketStats)
@@ -1468,18 +1451,18 @@ type ICMPStats struct {
type IGMPPacketStats struct {
// LINT.IfChange(IGMPPacketStats)
- // MembershipQuery is the total number of Membership Query messages counted.
+ // MembershipQuery is the number of Membership Query messages counted.
MembershipQuery *StatCounter
- // V1MembershipReport is the total number of Version 1 Membership Report
- // messages counted.
+ // V1MembershipReport is the number of Version 1 Membership Report messages
+ // counted.
V1MembershipReport *StatCounter
- // V2MembershipReport is the total number of Version 2 Membership Report
- // messages counted.
+ // V2MembershipReport is the number of Version 2 Membership Report messages
+ // counted.
V2MembershipReport *StatCounter
- // LeaveGroup is the total number of Leave Group messages counted.
+ // LeaveGroup is the number of Leave Group messages counted.
LeaveGroup *StatCounter
// LINT.ThenChange(network/ipv4/stats.go:multiCounterIGMPPacketStats)
@@ -1491,7 +1474,7 @@ type IGMPSentPacketStats struct {
IGMPPacketStats
- // Dropped is the total number of IGMP packets dropped.
+ // Dropped is the number of IGMP packets dropped.
Dropped *StatCounter
// LINT.ThenChange(network/ipv4/stats.go:multiCounterIGMPSentPacketStats)
@@ -1503,15 +1486,14 @@ type IGMPReceivedPacketStats struct {
IGMPPacketStats
- // Invalid is the total number of invalid IGMP packets received.
+ // Invalid is the number of invalid IGMP packets received.
Invalid *StatCounter
- // ChecksumErrors is the total number of IGMP packets dropped due to bad
- // checksums.
+ // ChecksumErrors is the number of IGMP packets dropped due to bad checksums.
ChecksumErrors *StatCounter
- // Unrecognized is the total number of unrecognized messages counted, these
- // are silently ignored for forward-compatibilty.
+ // Unrecognized is the number of unrecognized messages counted, these are
+ // silently ignored for forward-compatibilty.
Unrecognized *StatCounter
// LINT.ThenChange(network/ipv4/stats.go:multiCounterIGMPReceivedPacketStats)
@@ -1534,51 +1516,50 @@ type IGMPStats struct {
type IPStats struct {
// LINT.IfChange(IPStats)
- // PacketsReceived is the total number of IP packets received from the
- // link layer.
+ // PacketsReceived is the number of IP packets received from the link layer.
PacketsReceived *StatCounter
- // DisabledPacketsReceived is the total number of IP packets received from the
- // link layer when the IP layer is disabled.
+ // DisabledPacketsReceived is the number of IP packets received from the link
+ // layer when the IP layer is disabled.
DisabledPacketsReceived *StatCounter
- // InvalidDestinationAddressesReceived is the total number of IP packets
- // received with an unknown or invalid destination address.
+ // InvalidDestinationAddressesReceived is the number of IP packets received
+ // with an unknown or invalid destination address.
InvalidDestinationAddressesReceived *StatCounter
- // InvalidSourceAddressesReceived is the total number of IP packets received
- // with a source address that should never have been received on the wire.
+ // InvalidSourceAddressesReceived is the number of IP packets received with a
+ // source address that should never have been received on the wire.
InvalidSourceAddressesReceived *StatCounter
- // PacketsDelivered is the total number of incoming IP packets that
- // are successfully delivered to the transport layer.
+ // PacketsDelivered is the number of incoming IP packets that are successfully
+ // delivered to the transport layer.
PacketsDelivered *StatCounter
- // PacketsSent is the total number of IP packets sent via WritePacket.
+ // PacketsSent is the number of IP packets sent via WritePacket.
PacketsSent *StatCounter
- // OutgoingPacketErrors is the total number of IP packets which failed
- // to write to a link-layer endpoint.
+ // OutgoingPacketErrors is the number of IP packets which failed to write to a
+ // link-layer endpoint.
OutgoingPacketErrors *StatCounter
- // MalformedPacketsReceived is the total number of IP Packets that were
- // dropped due to the IP packet header failing validation checks.
+ // MalformedPacketsReceived is the number of IP Packets that were dropped due
+ // to the IP packet header failing validation checks.
MalformedPacketsReceived *StatCounter
- // MalformedFragmentsReceived is the total number of IP Fragments that were
- // dropped due to the fragment failing validation checks.
+ // MalformedFragmentsReceived is the number of IP Fragments that were dropped
+ // due to the fragment failing validation checks.
MalformedFragmentsReceived *StatCounter
- // IPTablesPreroutingDropped is the total number of IP packets dropped
- // in the Prerouting chain.
+ // IPTablesPreroutingDropped is the number of IP packets dropped in the
+ // Prerouting chain.
IPTablesPreroutingDropped *StatCounter
- // IPTablesInputDropped is the total number of IP packets dropped in
- // the Input chain.
+ // IPTablesInputDropped is the number of IP packets dropped in the Input
+ // chain.
IPTablesInputDropped *StatCounter
- // IPTablesOutputDropped is the total number of IP packets dropped in
- // the Output chain.
+ // IPTablesOutputDropped is the number of IP packets dropped in the Output
+ // chain.
IPTablesOutputDropped *StatCounter
// TODO(https://gvisor.dev/issues/5529): Move the IPv4-only option stats out
diff --git a/pkg/tcpip/tests/integration/forward_test.go b/pkg/tcpip/tests/integration/forward_test.go
index 0cb9d034e..38c2f321b 100644
--- a/pkg/tcpip/tests/integration/forward_test.go
+++ b/pkg/tcpip/tests/integration/forward_test.go
@@ -135,14 +135,15 @@ func TestForwarding(t *testing.T) {
name string
proto tcpip.TransportProtocolNumber
expectedConnectErr tcpip.Error
- setupServerSide func(t *testing.T, ep tcpip.Endpoint, ch <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{})
+ setupServer func(t *testing.T, ep tcpip.Endpoint)
+ setupServerConn func(t *testing.T, ep tcpip.Endpoint, ch <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{})
needRemoteAddr bool
}{
{
name: "UDP",
proto: udp.ProtocolNumber,
expectedConnectErr: nil,
- setupServerSide: func(t *testing.T, ep tcpip.Endpoint, _ <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{}) {
+ setupServerConn: func(t *testing.T, ep tcpip.Endpoint, _ <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{}) {
t.Helper()
if err := ep.Connect(clientAddr); err != nil {
@@ -156,12 +157,16 @@ func TestForwarding(t *testing.T) {
name: "TCP",
proto: tcp.ProtocolNumber,
expectedConnectErr: &tcpip.ErrConnectStarted{},
- setupServerSide: func(t *testing.T, ep tcpip.Endpoint, ch <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{}) {
+ setupServer: func(t *testing.T, ep tcpip.Endpoint) {
t.Helper()
if err := ep.Listen(1); err != nil {
t.Fatalf("ep.Listen(1): %s", err)
}
+ },
+ setupServerConn: func(t *testing.T, ep tcpip.Endpoint, ch <-chan struct{}, clientAddr tcpip.FullAddress) (tcpip.Endpoint, chan struct{}) {
+ t.Helper()
+
var addr tcpip.FullAddress
for {
newEP, wq, err := ep.Accept(&addr)
@@ -214,6 +219,9 @@ func TestForwarding(t *testing.T) {
t.Fatalf("epsAndAddrs.clientEP.Bind(%#v): %s", clientAddr, err)
}
+ if subTest.setupServer != nil {
+ subTest.setupServer(t, epsAndAddrs.serverEP)
+ }
{
err := epsAndAddrs.clientEP.Connect(serverAddr)
if diff := cmp.Diff(subTest.expectedConnectErr, err); diff != "" {
@@ -229,7 +237,7 @@ func TestForwarding(t *testing.T) {
serverEP := epsAndAddrs.serverEP
serverCH := epsAndAddrs.serverReadableCH
- if ep, ch := subTest.setupServerSide(t, serverEP, serverCH, clientAddr); ep != nil {
+ if ep, ch := subTest.setupServerConn(t, serverEP, serverCH, clientAddr); ep != nil {
defer ep.Close()
serverEP = ep
serverCH = ch
@@ -256,13 +264,20 @@ func TestForwarding(t *testing.T) {
read := func(ch chan struct{}, ep tcpip.Endpoint, data []byte, expectedFrom tcpip.FullAddress) {
t.Helper()
- // Wait for the endpoint to be readable.
- <-ch
var buf bytes.Buffer
- opts := tcpip.ReadOptions{NeedRemoteAddr: subTest.needRemoteAddr}
- res, err := ep.Read(&buf, opts)
- if err != nil {
- t.Fatalf("ep.Read(_, %d, %#v): %s", len(data), opts, err)
+ var res tcpip.ReadResult
+ for {
+ var err tcpip.Error
+ opts := tcpip.ReadOptions{NeedRemoteAddr: subTest.needRemoteAddr}
+ res, err = ep.Read(&buf, opts)
+ if _, ok := err.(*tcpip.ErrWouldBlock); ok {
+ <-ch
+ continue
+ }
+ if err != nil {
+ t.Fatalf("ep.Read(_, %d, %#v): %s", len(data), opts, err)
+ }
+ break
}
readResult := tcpip.ReadResult{
diff --git a/pkg/tcpip/tests/integration/link_resolution_test.go b/pkg/tcpip/tests/integration/link_resolution_test.go
index 165f73f21..095623789 100644
--- a/pkg/tcpip/tests/integration/link_resolution_test.go
+++ b/pkg/tcpip/tests/integration/link_resolution_test.go
@@ -675,9 +675,7 @@ func TestWritePacketsLinkResolution(t *testing.T) {
Length: length,
})
xsum := r.PseudoHeaderChecksum(udp.ProtocolNumber, length)
- for _, v := range pkt.Data.Views() {
- xsum = header.Checksum(v, xsum)
- }
+ xsum = header.ChecksumCombine(xsum, pkt.Data().AsRange().Checksum())
udpHdr.SetChecksum(^udpHdr.CalculateChecksum(xsum))
pkts.PushBack(pkt)
@@ -1169,53 +1167,53 @@ func TestDAD(t *testing.T) {
}
tests := []struct {
- name string
- netProto tcpip.NetworkProtocolNumber
- dadNetProto tcpip.NetworkProtocolNumber
- remoteAddr tcpip.Address
- expectedResolved bool
+ name string
+ netProto tcpip.NetworkProtocolNumber
+ dadNetProto tcpip.NetworkProtocolNumber
+ remoteAddr tcpip.Address
+ expectedResult stack.DADResult
}{
{
- name: "IPv4 own address",
- netProto: ipv4.ProtocolNumber,
- dadNetProto: arp.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr1.AddressWithPrefix.Address,
- expectedResolved: true,
+ name: "IPv4 own address",
+ netProto: ipv4.ProtocolNumber,
+ dadNetProto: arp.ProtocolNumber,
+ remoteAddr: utils.Ipv4Addr1.AddressWithPrefix.Address,
+ expectedResult: &stack.DADSucceeded{},
},
{
- name: "IPv6 own address",
- netProto: ipv6.ProtocolNumber,
- dadNetProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr1.AddressWithPrefix.Address,
- expectedResolved: true,
+ name: "IPv6 own address",
+ netProto: ipv6.ProtocolNumber,
+ dadNetProto: ipv6.ProtocolNumber,
+ remoteAddr: utils.Ipv6Addr1.AddressWithPrefix.Address,
+ expectedResult: &stack.DADSucceeded{},
},
{
- name: "IPv4 duplicate address",
- netProto: ipv4.ProtocolNumber,
- dadNetProto: arp.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
- expectedResolved: false,
+ name: "IPv4 duplicate address",
+ netProto: ipv4.ProtocolNumber,
+ dadNetProto: arp.ProtocolNumber,
+ remoteAddr: utils.Ipv4Addr2.AddressWithPrefix.Address,
+ expectedResult: &stack.DADDupAddrDetected{HolderLinkAddress: utils.LinkAddr2},
},
{
- name: "IPv6 duplicate address",
- netProto: ipv6.ProtocolNumber,
- dadNetProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
- expectedResolved: false,
+ name: "IPv6 duplicate address",
+ netProto: ipv6.ProtocolNumber,
+ dadNetProto: ipv6.ProtocolNumber,
+ remoteAddr: utils.Ipv6Addr2.AddressWithPrefix.Address,
+ expectedResult: &stack.DADDupAddrDetected{HolderLinkAddress: utils.LinkAddr2},
},
{
- name: "IPv4 no duplicate address",
- netProto: ipv4.ProtocolNumber,
- dadNetProto: arp.ProtocolNumber,
- remoteAddr: utils.Ipv4Addr3.AddressWithPrefix.Address,
- expectedResolved: true,
+ name: "IPv4 no duplicate address",
+ netProto: ipv4.ProtocolNumber,
+ dadNetProto: arp.ProtocolNumber,
+ remoteAddr: utils.Ipv4Addr3.AddressWithPrefix.Address,
+ expectedResult: &stack.DADSucceeded{},
},
{
- name: "IPv6 no duplicate address",
- netProto: ipv6.ProtocolNumber,
- dadNetProto: ipv6.ProtocolNumber,
- remoteAddr: utils.Ipv6Addr3.AddressWithPrefix.Address,
- expectedResolved: true,
+ name: "IPv6 no duplicate address",
+ netProto: ipv6.ProtocolNumber,
+ dadNetProto: ipv6.ProtocolNumber,
+ remoteAddr: utils.Ipv6Addr3.AddressWithPrefix.Address,
+ expectedResult: &stack.DADSucceeded{},
},
}
@@ -1262,7 +1260,7 @@ func TestDAD(t *testing.T) {
}
expectResults := 1
- if test.expectedResolved {
+ if _, ok := test.expectedResult.(*stack.DADSucceeded); ok {
const delta = time.Nanosecond
clock.Advance(time.Duration(dadConfigs.DupAddrDetectTransmits)*dadConfigs.RetransmitTimer - delta)
select {
@@ -1287,7 +1285,7 @@ func TestDAD(t *testing.T) {
}
for i := 0; i < expectResults; i++ {
- if diff := cmp.Diff(stack.DADResult{Resolved: test.expectedResolved}, <-ch); diff != "" {
+ if diff := cmp.Diff(test.expectedResult, <-ch); diff != "" {
t.Errorf("(i=%d) DAD result mismatch (-want +got):\n%s", i, diff)
}
}
diff --git a/pkg/tcpip/tests/integration/loopback_test.go b/pkg/tcpip/tests/integration/loopback_test.go
index c56155ea2..80afc2825 100644
--- a/pkg/tcpip/tests/integration/loopback_test.go
+++ b/pkg/tcpip/tests/integration/loopback_test.go
@@ -38,7 +38,7 @@ var _ ipv6.NDPDispatcher = (*ndpDispatcher)(nil)
type ndpDispatcher struct{}
-func (*ndpDispatcher) OnDuplicateAddressDetectionStatus(tcpip.NICID, tcpip.Address, bool, tcpip.Error) {
+func (*ndpDispatcher) OnDuplicateAddressDetectionResult(tcpip.NICID, tcpip.Address, stack.DADResult) {
}
func (*ndpDispatcher) OnDefaultRouterDiscovered(tcpip.NICID, tcpip.Address) bool {
diff --git a/pkg/tcpip/tests/integration/multicast_broadcast_test.go b/pkg/tcpip/tests/integration/multicast_broadcast_test.go
index e4439ba79..29266a4fc 100644
--- a/pkg/tcpip/tests/integration/multicast_broadcast_test.go
+++ b/pkg/tcpip/tests/integration/multicast_broadcast_test.go
@@ -75,7 +75,11 @@ func TestPingMulticastBroadcast(t *testing.T) {
pkt.SetType(header.ICMPv6EchoRequest)
pkt.SetCode(0)
pkt.SetChecksum(0)
- pkt.SetChecksum(header.ICMPv6Checksum(pkt, utils.RemoteIPv6Addr, dst, buffer.VectorisedView{}))
+ pkt.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: pkt,
+ Src: utils.RemoteIPv6Addr,
+ Dst: dst,
+ }))
ip := header.IPv6(hdr.Prepend(header.IPv6MinimumSize))
ip.Encode(&header.IPv6Fields{
PayloadLength: header.ICMPv6MinimumSize,
diff --git a/pkg/tcpip/transport/icmp/endpoint.go b/pkg/tcpip/transport/icmp/endpoint.go
index f5e1a6e45..06c63e74a 100644
--- a/pkg/tcpip/transport/icmp/endpoint.go
+++ b/pkg/tcpip/transport/icmp/endpoint.go
@@ -26,6 +26,8 @@ import (
"gvisor.dev/gvisor/pkg/waiter"
)
+// TODO(https://gvisor.dev/issues/5623): Unit test this package.
+
// +stateify savable
type icmpPacket struct {
icmpPacketEntry
@@ -414,15 +416,27 @@ func send4(r *stack.Route, ident uint16, data buffer.View, ttl uint8, owner tcpi
return &tcpip.ErrInvalidEndpointState{}
}
+ // Because this icmp endpoint is implemented in the transport layer, we can
+ // only increment the 'stack-wide' stats but we can't increment the
+ // 'per-NetworkEndpoint' stats.
+ sentStat := r.Stats().ICMP.V4.PacketsSent.EchoRequest
+
icmpv4.SetChecksum(0)
icmpv4.SetChecksum(^header.Checksum(icmpv4, header.Checksum(data, 0)))
- pkt.Data = data.ToVectorisedView()
+ pkt.Data().AppendView(data)
if ttl == 0 {
ttl = r.DefaultTTL()
}
- return r.WritePacket(nil /* gso */, stack.NetworkHeaderParams{Protocol: header.ICMPv4ProtocolNumber, TTL: ttl, TOS: stack.DefaultTOS}, pkt)
+
+ if err := r.WritePacket(nil /* gso */, stack.NetworkHeaderParams{Protocol: header.ICMPv4ProtocolNumber, TTL: ttl, TOS: stack.DefaultTOS}, pkt); err != nil {
+ r.Stats().ICMP.V4.PacketsSent.Dropped.Increment()
+ return err
+ }
+
+ sentStat.Increment()
+ return nil
}
func send6(r *stack.Route, ident uint16, data buffer.View, ttl uint8) tcpip.Error {
@@ -444,15 +458,31 @@ func send6(r *stack.Route, ident uint16, data buffer.View, ttl uint8) tcpip.Erro
if icmpv6.Type() != header.ICMPv6EchoRequest || icmpv6.Code() != 0 {
return &tcpip.ErrInvalidEndpointState{}
}
-
- dataVV := data.ToVectorisedView()
- icmpv6.SetChecksum(header.ICMPv6Checksum(icmpv6, r.LocalAddress, r.RemoteAddress, dataVV))
- pkt.Data = dataVV
+ // Because this icmp endpoint is implemented in the transport layer, we can
+ // only increment the 'stack-wide' stats but we can't increment the
+ // 'per-NetworkEndpoint' stats.
+ sentStat := r.Stats().ICMP.V6.PacketsSent.EchoRequest
+
+ pkt.Data().AppendView(data)
+ dataRange := pkt.Data().AsRange()
+ icmpv6.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmpv6,
+ Src: r.LocalAddress,
+ Dst: r.RemoteAddress,
+ PayloadCsum: dataRange.Checksum(),
+ PayloadLen: dataRange.Size(),
+ }))
if ttl == 0 {
ttl = r.DefaultTTL()
}
- return r.WritePacket(nil /* gso */, stack.NetworkHeaderParams{Protocol: header.ICMPv6ProtocolNumber, TTL: ttl, TOS: stack.DefaultTOS}, pkt)
+
+ if err := r.WritePacket(nil /* gso */, stack.NetworkHeaderParams{Protocol: header.ICMPv6ProtocolNumber, TTL: ttl, TOS: stack.DefaultTOS}, pkt); err != nil {
+ r.Stats().ICMP.V6.PacketsSent.Dropped.Increment()
+ }
+
+ sentStat.Increment()
+ return nil
}
// checkV4MappedLocked determines the effective network protocol and converts
@@ -763,7 +793,7 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketB
// ICMP socket's data includes ICMP header.
packet.data = pkt.TransportHeader().View().ToVectorisedView()
- packet.data.Append(pkt.Data)
+ packet.data.Append(pkt.Data().ExtractVV())
e.rcvList.PushBack(packet)
e.rcvBufSize += packet.data.Size()
diff --git a/pkg/tcpip/transport/packet/endpoint.go b/pkg/tcpip/transport/packet/endpoint.go
index 73bb66830..367757d3b 100644
--- a/pkg/tcpip/transport/packet/endpoint.go
+++ b/pkg/tcpip/transport/packet/endpoint.go
@@ -432,7 +432,7 @@ func (ep *endpoint) HandlePacket(nicID tcpip.NICID, localAddr tcpip.LinkAddress,
// Cooked packets can simply be queued.
switch pkt.PktType {
case tcpip.PacketHost:
- packet.data = pkt.Data
+ packet.data = pkt.Data().ExtractVV()
case tcpip.PacketOutgoing:
// Strip Link Header.
var combinedVV buffer.VectorisedView
@@ -442,7 +442,7 @@ func (ep *endpoint) HandlePacket(nicID tcpip.NICID, localAddr tcpip.LinkAddress,
if v := pkt.TransportHeader().View(); !v.IsEmpty() {
combinedVV.AppendView(v)
}
- combinedVV.Append(pkt.Data)
+ combinedVV.Append(pkt.Data().ExtractVV())
packet.data = combinedVV
default:
panic(fmt.Sprintf("unexpected PktType in pkt: %+v", pkt))
@@ -468,7 +468,7 @@ func (ep *endpoint) HandlePacket(nicID tcpip.NICID, localAddr tcpip.LinkAddress,
linkHeader = append(buffer.View(nil), pkt.LinkHeader().View()...)
}
combinedVV := linkHeader.ToVectorisedView()
- combinedVV.Append(pkt.Data)
+ combinedVV.Append(pkt.Data().ExtractVV())
packet.data = combinedVV
} else {
packet.data = buffer.NewVectorisedView(pkt.Size(), pkt.Views())
diff --git a/pkg/tcpip/transport/raw/endpoint.go b/pkg/tcpip/transport/raw/endpoint.go
index fe8e9c751..2709be90c 100644
--- a/pkg/tcpip/transport/raw/endpoint.go
+++ b/pkg/tcpip/transport/raw/endpoint.go
@@ -644,7 +644,7 @@ func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {
} else {
combinedVV = append(buffer.View(nil), pkt.TransportHeader().View()...).ToVectorisedView()
}
- combinedVV.Append(pkt.Data)
+ combinedVV.Append(pkt.Data().ExtractVV())
packet.data = combinedVV
packet.timestampNS = e.stack.Clock().NowNanoseconds()
diff --git a/pkg/tcpip/transport/tcp/BUILD b/pkg/tcpip/transport/tcp/BUILD
index fcdd032c5..a69d6624d 100644
--- a/pkg/tcpip/transport/tcp/BUILD
+++ b/pkg/tcpip/transport/tcp/BUILD
@@ -105,7 +105,6 @@ go_test(
"//pkg/tcpip/link/sniffer",
"//pkg/tcpip/network/ipv4",
"//pkg/tcpip/network/ipv6",
- "//pkg/tcpip/ports",
"//pkg/tcpip/seqnum",
"//pkg/tcpip/stack",
"//pkg/tcpip/transport/tcp/testing/context",
diff --git a/pkg/tcpip/transport/tcp/accept.go b/pkg/tcpip/transport/tcp/accept.go
index 842c1622b..3b574837c 100644
--- a/pkg/tcpip/transport/tcp/accept.go
+++ b/pkg/tcpip/transport/tcp/accept.go
@@ -27,6 +27,7 @@ import (
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
+ "gvisor.dev/gvisor/pkg/tcpip/ports"
"gvisor.dev/gvisor/pkg/tcpip/seqnum"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/waiter"
@@ -432,15 +433,16 @@ func (e *endpoint) propagateInheritableOptionsLocked(n *endpoint) {
// * e.mu is held.
func (e *endpoint) reserveTupleLocked() bool {
dest := tcpip.FullAddress{Addr: e.ID.RemoteAddress, Port: e.ID.RemotePort}
- if !e.stack.ReserveTuple(
- e.effectiveNetProtos,
- ProtocolNumber,
- e.ID.LocalAddress,
- e.ID.LocalPort,
- e.boundPortFlags,
- e.boundBindToDevice,
- dest,
- ) {
+ portRes := ports.Reservation{
+ Networks: e.effectiveNetProtos,
+ Transport: ProtocolNumber,
+ Addr: e.ID.LocalAddress,
+ Port: e.ID.LocalPort,
+ Flags: e.boundPortFlags,
+ BindToDevice: e.boundBindToDevice,
+ Dest: dest,
+ }
+ if !e.stack.ReserveTuple(portRes) {
return false
}
diff --git a/pkg/tcpip/transport/tcp/connect.go b/pkg/tcpip/transport/tcp/connect.go
index 461b1a9d7..3404af6bb 100644
--- a/pkg/tcpip/transport/tcp/connect.go
+++ b/pkg/tcpip/transport/tcp/connect.go
@@ -68,7 +68,7 @@ type handshake struct {
ep *endpoint
state handshakeState
active bool
- flags uint8
+ flags header.TCPFlags
ackNum seqnum.Value
// iss is the initial send sequence number, as defined in RFC 793.
@@ -606,7 +606,7 @@ func newBackoffTimer(timeout, maxTimeout time.Duration, f func()) (*backoffTimer
func (bt *backoffTimer) reset() tcpip.Error {
bt.timeout *= 2
- if bt.timeout > MaxRTO {
+ if bt.timeout > bt.maxTimeout {
return &tcpip.ErrTimeout{}
}
bt.t.Reset(bt.timeout)
@@ -700,7 +700,7 @@ type tcpFields struct {
id stack.TransportEndpointID
ttl uint8
tos uint8
- flags byte
+ flags header.TCPFlags
seq seqnum.Value
ack seqnum.Value
rcvWnd seqnum.Size
@@ -752,7 +752,7 @@ func buildTCPHdr(r *stack.Route, tf tcpFields, pkt *stack.PacketBuffer, gso *sta
// header and data and get the right sum of the TCP packet.
tcp.SetChecksum(xsum)
} else if r.RequiresTXTransportChecksum() {
- xsum = header.ChecksumVV(pkt.Data, xsum)
+ xsum = header.ChecksumCombine(xsum, pkt.Data().AsRange().Checksum())
tcp.SetChecksum(^tcp.CalculateChecksum(xsum))
}
}
@@ -786,7 +786,7 @@ func sendTCPBatch(r *stack.Route, tf tcpFields, data buffer.VectorisedView, gso
})
pkt.Hash = tf.txHash
pkt.Owner = owner
- data.ReadToVV(&pkt.Data, packetSize)
+ pkt.Data().ReadFromVV(&data, packetSize)
buildTCPHdr(r, tf, pkt, gso)
tf.seq = tf.seq.Add(seqnum.Size(packetSize))
pkts.PushBack(pkt)
@@ -877,7 +877,7 @@ func (e *endpoint) makeOptions(sackBlocks []header.SACKBlock) []byte {
}
// sendRaw sends a TCP segment to the endpoint's peer.
-func (e *endpoint) sendRaw(data buffer.VectorisedView, flags byte, seq, ack seqnum.Value, rcvWnd seqnum.Size) tcpip.Error {
+func (e *endpoint) sendRaw(data buffer.VectorisedView, flags header.TCPFlags, seq, ack seqnum.Value, rcvWnd seqnum.Size) tcpip.Error {
var sackBlocks []header.SACKBlock
if e.EndpointState() == StateEstablished && e.rcv.pendingRcvdSegments.Len() > 0 && (flags&header.TCPFlagAck != 0) {
sackBlocks = e.sack.Blocks[:e.sack.NumBlocks]
diff --git a/pkg/tcpip/transport/tcp/endpoint.go b/pkg/tcpip/transport/tcp/endpoint.go
index f47b39ccc..129f36d11 100644
--- a/pkg/tcpip/transport/tcp/endpoint.go
+++ b/pkg/tcpip/transport/tcp/endpoint.go
@@ -760,6 +760,7 @@ func (e *endpoint) LockUser() {
// protocol goroutine altogether.
//
// Precondition: e.LockUser() must have been called before calling e.UnlockUser()
+// +checklocks:e.mu
func (e *endpoint) UnlockUser() {
// Lock segment queue before checking so that we avoid a race where
// segments can be queued between the time we check if queue is empty
@@ -800,6 +801,7 @@ func (e *endpoint) StopWork() {
}
// ResumeWork resumes packet processing. Only to be used in tests.
+// +checklocks:e.mu
func (e *endpoint) ResumeWork() {
e.mu.Unlock()
}
@@ -1095,7 +1097,16 @@ func (e *endpoint) closeNoShutdownLocked() {
e.isRegistered = false
}
- e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice, e.boundDest)
+ portRes := ports.Reservation{
+ Networks: e.effectiveNetProtos,
+ Transport: ProtocolNumber,
+ Addr: e.ID.LocalAddress,
+ Port: e.ID.LocalPort,
+ Flags: e.boundPortFlags,
+ BindToDevice: e.boundBindToDevice,
+ Dest: e.boundDest,
+ }
+ e.stack.ReleasePort(portRes)
e.isPortReserved = false
e.boundBindToDevice = 0
e.boundPortFlags = ports.Flags{}
@@ -1170,7 +1181,16 @@ func (e *endpoint) cleanupLocked() {
}
if e.isPortReserved {
- e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice, e.boundDest)
+ portRes := ports.Reservation{
+ Networks: e.effectiveNetProtos,
+ Transport: ProtocolNumber,
+ Addr: e.ID.LocalAddress,
+ Port: e.ID.LocalPort,
+ Flags: e.boundPortFlags,
+ BindToDevice: e.boundBindToDevice,
+ Dest: e.boundDest,
+ }
+ e.stack.ReleasePort(portRes)
e.isPortReserved = false
}
e.boundBindToDevice = 0
@@ -2218,7 +2238,7 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) tcp
portBuf := make([]byte, 2)
binary.LittleEndian.PutUint16(portBuf, e.ID.RemotePort)
h.Write(portBuf)
- portOffset := h.Sum32()
+ portOffset := uint16(h.Sum32())
var twReuse tcpip.TCPTimeWaitReuseOption
if err := e.stack.TransportProtocolOption(ProtocolNumber, &twReuse); err != nil {
@@ -2240,7 +2260,16 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) tcp
if sameAddr && p == e.ID.RemotePort {
return false, nil
}
- if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, bindToDevice, addr, nil /* testPort */); err != nil {
+ portRes := ports.Reservation{
+ Networks: netProtos,
+ Transport: ProtocolNumber,
+ Addr: e.ID.LocalAddress,
+ Port: p,
+ Flags: e.portFlags,
+ BindToDevice: bindToDevice,
+ Dest: addr,
+ }
+ if _, err := e.stack.ReservePort(portRes, nil /* testPort */); err != nil {
if _, ok := err.(*tcpip.ErrPortInUse); !ok || !reuse {
return false, nil
}
@@ -2278,7 +2307,16 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) tcp
tcpEP.notifyProtocolGoroutine(notifyAbort)
tcpEP.UnlockUser()
// Now try and Reserve again if it fails then we skip.
- if _, err := e.stack.ReservePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, bindToDevice, addr, nil /* testPort */); err != nil {
+ portRes := ports.Reservation{
+ Networks: netProtos,
+ Transport: ProtocolNumber,
+ Addr: e.ID.LocalAddress,
+ Port: p,
+ Flags: e.portFlags,
+ BindToDevice: bindToDevice,
+ Dest: addr,
+ }
+ if _, err := e.stack.ReservePort(portRes, nil /* testPort */); err != nil {
return false, nil
}
}
@@ -2286,7 +2324,16 @@ func (e *endpoint) connect(addr tcpip.FullAddress, handshake bool, run bool) tcp
id := e.ID
id.LocalPort = p
if err := e.stack.RegisterTransportEndpoint(netProtos, ProtocolNumber, id, e, e.portFlags, bindToDevice); err != nil {
- e.stack.ReleasePort(netProtos, ProtocolNumber, e.ID.LocalAddress, p, e.portFlags, bindToDevice, addr)
+ portRes := ports.Reservation{
+ Networks: netProtos,
+ Transport: ProtocolNumber,
+ Addr: e.ID.LocalAddress,
+ Port: p,
+ Flags: e.portFlags,
+ BindToDevice: bindToDevice,
+ Dest: addr,
+ }
+ e.stack.ReleasePort(portRes)
if _, ok := err.(*tcpip.ErrPortInUse); ok {
return false, nil
}
@@ -2602,7 +2649,16 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) {
}
bindToDevice := tcpip.NICID(e.ops.GetBindToDevice())
- port, err := e.stack.ReservePort(netProtos, ProtocolNumber, addr.Addr, addr.Port, e.portFlags, bindToDevice, tcpip.FullAddress{}, func(p uint16) bool {
+ portRes := ports.Reservation{
+ Networks: netProtos,
+ Transport: ProtocolNumber,
+ Addr: addr.Addr,
+ Port: addr.Port,
+ Flags: e.portFlags,
+ BindToDevice: bindToDevice,
+ Dest: tcpip.FullAddress{},
+ }
+ port, err := e.stack.ReservePort(portRes, func(p uint16) (bool, tcpip.Error) {
id := e.ID
id.LocalPort = p
// CheckRegisterTransportEndpoint should only return an error if there is a
@@ -2614,9 +2670,9 @@ func (e *endpoint) bindLocked(addr tcpip.FullAddress) (err tcpip.Error) {
// address/port. Hence this will only return an error if there is a matching
// listening endpoint.
if err := e.stack.CheckRegisterTransportEndpoint(netProtos, ProtocolNumber, id, e.portFlags, bindToDevice); err != nil {
- return false
+ return false, nil
}
- return true
+ return true, nil
})
if err != nil {
return err
@@ -2699,7 +2755,7 @@ func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, p
Cause: transErr,
// Linux passes the payload with the TCP header. We don't know if the TCP
// header even exists, it may not for fragmented packets.
- Payload: pkt.Data.ToView(),
+ Payload: pkt.Data().AsRange().ToOwnedView(),
Dst: tcpip.FullAddress{
NIC: pkt.NICID,
Addr: e.ID.RemoteAddress,
diff --git a/pkg/tcpip/transport/tcp/endpoint_state.go b/pkg/tcpip/transport/tcp/endpoint_state.go
index e4368026f..a53d76917 100644
--- a/pkg/tcpip/transport/tcp/endpoint_state.go
+++ b/pkg/tcpip/transport/tcp/endpoint_state.go
@@ -22,9 +22,11 @@ import (
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
+ "gvisor.dev/gvisor/pkg/tcpip/ports"
"gvisor.dev/gvisor/pkg/tcpip/stack"
)
+// +checklocks:e.mu
func (e *endpoint) drainSegmentLocked() {
// Drain only up to once.
if e.drainDone != nil {
@@ -207,7 +209,16 @@ func (e *endpoint) Resume(s *stack.Stack) {
if err != nil {
panic("unable to parse BindAddr: " + err.String())
}
- if ok := e.stack.ReserveTuple(e.effectiveNetProtos, ProtocolNumber, addr.Addr, addr.Port, e.boundPortFlags, e.boundBindToDevice, e.boundDest); !ok {
+ portRes := ports.Reservation{
+ Networks: e.effectiveNetProtos,
+ Transport: ProtocolNumber,
+ Addr: addr.Addr,
+ Port: addr.Port,
+ Flags: e.boundPortFlags,
+ BindToDevice: e.boundBindToDevice,
+ Dest: e.boundDest,
+ }
+ if ok := e.stack.ReserveTuple(portRes); !ok {
panic(fmt.Sprintf("unable to re-reserve tuple (%v, %q, %d, %+v, %d, %v)", e.effectiveNetProtos, addr.Addr, addr.Port, e.boundPortFlags, e.boundBindToDevice, e.boundDest))
}
e.isPortReserved = true
diff --git a/pkg/tcpip/transport/tcp/protocol.go b/pkg/tcpip/transport/tcp/protocol.go
index 04012cd40..2a4667906 100644
--- a/pkg/tcpip/transport/tcp/protocol.go
+++ b/pkg/tcpip/transport/tcp/protocol.go
@@ -226,7 +226,7 @@ func replyWithReset(stack *stack.Stack, s *segment, tos, ttl uint8) tcpip.Error
// Get the seqnum from the packet if the ack flag is set.
seq := seqnum.Value(0)
ack := seqnum.Value(0)
- flags := byte(header.TCPFlagRst)
+ flags := header.TCPFlagRst
// As per RFC 793 page 35 (Reset Generation)
// 1. If the connection does not exist (CLOSED) then a reset is sent
// in response to any incoming segment except another reset. In
diff --git a/pkg/tcpip/transport/tcp/segment.go b/pkg/tcpip/transport/tcp/segment.go
index f27eef6a9..8edd6775b 100644
--- a/pkg/tcpip/transport/tcp/segment.go
+++ b/pkg/tcpip/transport/tcp/segment.go
@@ -62,7 +62,7 @@ type segment struct {
views [8]buffer.View `state:"nosave"`
sequenceNumber seqnum.Value
ackNumber seqnum.Value
- flags uint8
+ flags header.TCPFlags
window seqnum.Size
// csum is only populated for received segments.
csum uint16
@@ -98,7 +98,7 @@ func newIncomingSegment(id stack.TransportEndpointID, pkt *stack.PacketBuffer) *
netProto: pkt.NetworkProtocolNumber,
nicID: pkt.NICID,
}
- s.data = pkt.Data.Clone(s.views[:])
+ s.data = pkt.Data().ExtractVV().Clone(s.views[:])
s.hdr = header.TCP(pkt.TransportHeader().View())
s.rcvdTime = time.Now()
s.dataMemSize = s.data.Size()
@@ -141,12 +141,12 @@ func (s *segment) clone() *segment {
}
// flagIsSet checks if at least one flag in flags is set in s.flags.
-func (s *segment) flagIsSet(flags uint8) bool {
+func (s *segment) flagIsSet(flags header.TCPFlags) bool {
return s.flags&flags != 0
}
// flagsAreSet checks if all flags in flags are set in s.flags.
-func (s *segment) flagsAreSet(flags uint8) bool {
+func (s *segment) flagsAreSet(flags header.TCPFlags) bool {
return s.flags&flags == flags
}
diff --git a/pkg/tcpip/transport/tcp/snd.go b/pkg/tcpip/transport/tcp/snd.go
index 83c8deb0e..18817029d 100644
--- a/pkg/tcpip/transport/tcp/snd.go
+++ b/pkg/tcpip/transport/tcp/snd.go
@@ -1613,7 +1613,7 @@ func (s *sender) sendSegment(seg *segment) tcpip.Error {
// sendSegmentFromView sends a new segment containing the given payload, flags
// and sequence number.
-func (s *sender) sendSegmentFromView(data buffer.VectorisedView, flags byte, seq seqnum.Value) tcpip.Error {
+func (s *sender) sendSegmentFromView(data buffer.VectorisedView, flags header.TCPFlags, seq seqnum.Value) tcpip.Error {
s.lastSendTime = time.Now()
if seq == s.rttMeasureSeqNum {
s.rttMeasureTime = s.lastSendTime
diff --git a/pkg/tcpip/transport/tcp/tcp_test.go b/pkg/tcpip/transport/tcp/tcp_test.go
index 0128c1f7e..fd499a47b 100644
--- a/pkg/tcpip/transport/tcp/tcp_test.go
+++ b/pkg/tcpip/transport/tcp/tcp_test.go
@@ -33,7 +33,6 @@ import (
"gvisor.dev/gvisor/pkg/tcpip/link/sniffer"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
- "gvisor.dev/gvisor/pkg/tcpip/ports"
"gvisor.dev/gvisor/pkg/tcpip/seqnum"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
@@ -1373,7 +1372,7 @@ func TestTOSV4(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790), // Acknum is initial sequence number + 1
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
checker.TOS(tos, 0),
)
@@ -1421,7 +1420,7 @@ func TestTrafficClassV6(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
checker.TOS(tos, 0),
)
@@ -2202,7 +2201,7 @@ func TestSimpleSend(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -2242,7 +2241,7 @@ func TestZeroWindowSend(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -2264,7 +2263,7 @@ func TestZeroWindowSend(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -2311,7 +2310,7 @@ func TestScaledWindowConnect(t *testing.T) {
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
checker.TCPWindow(0x5fff),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
}
@@ -2342,7 +2341,7 @@ func TestNonScaledWindowConnect(t *testing.T) {
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
checker.TCPWindow(0xffff),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
}
@@ -2415,7 +2414,7 @@ func TestScaledWindowAccept(t *testing.T) {
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
checker.TCPWindow(0x5fff),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
}
@@ -2488,7 +2487,7 @@ func TestNonScaledWindowAccept(t *testing.T) {
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
checker.TCPWindow(0xffff),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
}
@@ -2666,7 +2665,7 @@ func TestSegmentMerging(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+uint32(i)+1),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
}
@@ -2689,7 +2688,7 @@ func TestSegmentMerging(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+11),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -2738,7 +2737,7 @@ func TestDelay(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(seq)),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -2786,7 +2785,7 @@ func TestUndelay(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(seq)),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -2809,7 +2808,7 @@ func TestUndelay(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(seq)),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -2872,7 +2871,7 @@ func TestMSSNotDelayed(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(seq)),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -2923,7 +2922,7 @@ func testBrokenUpWrite(t *testing.T, c *context.Context, maxPayload int) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+1+uint32(bytesReceived)),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -3438,7 +3437,7 @@ func TestMaxRTO(t *testing.T) {
checker.IPv4(t, c.GetPacket(),
checker.TCP(
checker.DstPort(context.TestPort),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
const numRetransmits = 2
@@ -3447,7 +3446,7 @@ func TestMaxRTO(t *testing.T) {
checker.IPv4(t, c.GetPacket(),
checker.TCP(
checker.DstPort(context.TestPort),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
if time.Since(start).Round(time.Second).Seconds() != rto.Seconds() {
@@ -3490,7 +3489,7 @@ func TestRetransmitIPv4IDUniqueness(t *testing.T) {
checker.FragmentFlags(0),
checker.TCP(
checker.DstPort(context.TestPort),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
idSet := map[uint16]struct{}{header.IPv4(pkt).ID(): {}}
@@ -3502,7 +3501,7 @@ func TestRetransmitIPv4IDUniqueness(t *testing.T) {
checker.FragmentFlags(0),
checker.TCP(
checker.DstPort(context.TestPort),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
id := header.IPv4(pkt).ID()
@@ -3633,7 +3632,7 @@ func TestFinWithNoPendingData(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(next),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
next += uint32(len(view))
@@ -3710,7 +3709,7 @@ func TestFinWithPendingDataCwndFull(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(next),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
next += uint32(len(view))
@@ -3729,7 +3728,7 @@ func TestFinWithPendingDataCwndFull(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -3796,7 +3795,7 @@ func TestFinWithPendingData(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(next),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
next += uint32(len(view))
@@ -3822,7 +3821,7 @@ func TestFinWithPendingData(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(next),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
next += uint32(len(view))
@@ -3886,7 +3885,7 @@ func TestFinWithPartialAck(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(next),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
next += uint32(len(view))
@@ -3907,7 +3906,7 @@ func TestFinWithPartialAck(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(next),
checker.TCPAckNum(791),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -3923,7 +3922,7 @@ func TestFinWithPartialAck(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(next),
checker.TCPAckNum(791),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
next += uint32(len(view))
@@ -4033,7 +4032,7 @@ func scaledSendWindow(t *testing.T, scale uint8) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -4783,7 +4782,8 @@ func TestConnectAvoidsBoundPorts(t *testing.T) {
t.Fatalf("unknown address type: '%s'", candidateAddressType)
}
- for i := ports.FirstEphemeral; i <= math.MaxUint16; i++ {
+ start, end := s.PortRange()
+ for i := start; i <= end; i++ {
if makeEP(exhaustedNetwork).Bind(tcpip.FullAddress{Addr: address(t, exhaustedAddressType, isAny), Port: uint16(i)}); err != nil {
t.Fatalf("Bind(%d) failed: %s", i, err)
}
@@ -4844,7 +4844,7 @@ func TestPathMTUDiscovery(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(seqNum),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
seqNum += uint32(size)
@@ -5129,7 +5129,7 @@ func TestKeepalive(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(next),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -7174,7 +7174,7 @@ func TestTCPCloseWithData(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(uint32(iss)+2), // Acknum is initial sequence number + 1
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -7274,7 +7274,7 @@ func TestTCPUserTimeout(t *testing.T) {
checker.DstPort(context.TestPort),
checker.TCPSeqNum(next),
checker.TCPAckNum(790),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
diff --git a/pkg/tcpip/transport/tcp/tcp_timestamp_test.go b/pkg/tcpip/transport/tcp/tcp_timestamp_test.go
index 5a9745ad7..cb4f82903 100644
--- a/pkg/tcpip/transport/tcp/tcp_timestamp_test.go
+++ b/pkg/tcpip/transport/tcp/tcp_timestamp_test.go
@@ -170,7 +170,7 @@ func timeStampEnabledAccept(t *testing.T, cookieEnabled bool, wndScale int, wndS
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
checker.TCPWindow(wndSize),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
checker.TCPTimestampChecker(true, 0, tsVal+1),
),
)
@@ -231,7 +231,7 @@ func timeStampDisabledAccept(t *testing.T, cookieEnabled bool, wndScale int, wnd
checker.TCPSeqNum(uint32(c.IRS)+1),
checker.TCPAckNum(790),
checker.TCPWindow(wndSize),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
checker.TCPTimestampChecker(false, 0, 0),
),
)
diff --git a/pkg/tcpip/transport/tcp/testing/context/context.go b/pkg/tcpip/transport/tcp/testing/context/context.go
index b1cb9a324..2f1c1011d 100644
--- a/pkg/tcpip/transport/tcp/testing/context/context.go
+++ b/pkg/tcpip/transport/tcp/testing/context/context.go
@@ -101,7 +101,7 @@ type Headers struct {
AckNum seqnum.Value
// Flags are the TCP flags in the TCP header.
- Flags int
+ Flags header.TCPFlags
// RcvWnd is the window to be advertised in the ReceiveWindow field of
// the TCP header.
@@ -452,7 +452,7 @@ func (c *Context) BuildSegmentWithAddrs(payload []byte, h *Headers, src, dst tcp
SeqNum: uint32(h.SeqNum),
AckNum: uint32(h.AckNum),
DataOffset: uint8(header.TCPMinimumSize + len(h.TCPOpts)),
- Flags: uint8(h.Flags),
+ Flags: h.Flags,
WindowSize: uint16(h.RcvWnd),
})
@@ -544,7 +544,7 @@ func (c *Context) ReceiveAndCheckPacketWithOptions(data []byte, offset, size, op
checker.DstPort(TestPort),
checker.TCPSeqNum(uint32(c.IRS.Add(seqnum.Size(1+offset)))),
checker.TCPAckNum(uint32(seqnum.Value(TestInitialSequenceNumber).Add(1))),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -571,7 +571,7 @@ func (c *Context) ReceiveNonBlockingAndCheckPacket(data []byte, offset, size int
checker.DstPort(TestPort),
checker.TCPSeqNum(uint32(c.IRS.Add(seqnum.Size(1+offset)))),
checker.TCPAckNum(uint32(seqnum.Value(TestInitialSequenceNumber).Add(1))),
- checker.TCPFlagsMatch(header.TCPFlagAck, ^uint8(header.TCPFlagPsh)),
+ checker.TCPFlagsMatch(header.TCPFlagAck, ^header.TCPFlagPsh),
),
)
@@ -650,7 +650,7 @@ func (c *Context) SendV6PacketWithAddrs(payload []byte, h *Headers, src, dst tcp
SeqNum: uint32(h.SeqNum),
AckNum: uint32(h.AckNum),
DataOffset: header.TCPMinimumSize,
- Flags: uint8(h.Flags),
+ Flags: h.Flags,
WindowSize: uint16(h.RcvWnd),
})
@@ -780,7 +780,7 @@ type RawEndpoint struct {
C *Context
SrcPort uint16
DstPort uint16
- Flags int
+ Flags header.TCPFlags
NextSeqNum seqnum.Value
AckNum seqnum.Value
WndSize seqnum.Size
diff --git a/pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go b/pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go
index 5e271b7ca..6c5ddc3c7 100644
--- a/pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go
+++ b/pkg/tcpip/transport/tcpconntrack/tcp_conntrack_test.go
@@ -465,7 +465,7 @@ func TestIgnoreBadResetOnSynSent(t *testing.T) {
// Receive a RST with a bad ACK, it should not cause the connection to
// be reset.
acks := []uint32{1234, 1236, 1000, 5000}
- flags := []uint8{header.TCPFlagRst, header.TCPFlagRst | header.TCPFlagAck}
+ flags := []header.TCPFlags{header.TCPFlagRst, header.TCPFlagRst | header.TCPFlagAck}
for _, a := range acks {
for _, f := range flags {
tcp.Encode(&header.TCPFields{
diff --git a/pkg/tcpip/transport/udp/endpoint.go b/pkg/tcpip/transport/udp/endpoint.go
index 807df2bb5..c0f566459 100644
--- a/pkg/tcpip/transport/udp/endpoint.go
+++ b/pkg/tcpip/transport/udp/endpoint.go
@@ -245,7 +245,16 @@ func (e *endpoint) Close() {
switch e.EndpointState() {
case StateBound, StateConnected:
e.stack.UnregisterTransportEndpoint(e.effectiveNetProtos, ProtocolNumber, e.ID, e, e.boundPortFlags, e.boundBindToDevice)
- e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, e.boundPortFlags, e.boundBindToDevice, tcpip.FullAddress{})
+ portRes := ports.Reservation{
+ Networks: e.effectiveNetProtos,
+ Transport: ProtocolNumber,
+ Addr: e.ID.LocalAddress,
+ Port: e.ID.LocalPort,
+ Flags: e.boundPortFlags,
+ BindToDevice: e.boundBindToDevice,
+ Dest: tcpip.FullAddress{},
+ }
+ e.stack.ReleasePort(portRes)
e.boundBindToDevice = 0
e.boundPortFlags = ports.Flags{}
}
@@ -920,7 +929,16 @@ func (e *endpoint) Disconnect() tcpip.Error {
} else {
if e.ID.LocalPort != 0 {
// Release the ephemeral port.
- e.stack.ReleasePort(e.effectiveNetProtos, ProtocolNumber, e.ID.LocalAddress, e.ID.LocalPort, boundPortFlags, e.boundBindToDevice, tcpip.FullAddress{})
+ portRes := ports.Reservation{
+ Networks: e.effectiveNetProtos,
+ Transport: ProtocolNumber,
+ Addr: e.ID.LocalAddress,
+ Port: e.ID.LocalPort,
+ Flags: boundPortFlags,
+ BindToDevice: e.boundBindToDevice,
+ Dest: tcpip.FullAddress{},
+ }
+ e.stack.ReleasePort(portRes)
e.boundPortFlags = ports.Flags{}
}
e.setEndpointState(StateInitial)
@@ -1072,7 +1090,16 @@ func (*endpoint) Accept(*tcpip.FullAddress) (tcpip.Endpoint, *waiter.Queue, tcpi
func (e *endpoint) registerWithStack(netProtos []tcpip.NetworkProtocolNumber, id stack.TransportEndpointID) (stack.TransportEndpointID, tcpip.NICID, tcpip.Error) {
bindToDevice := tcpip.NICID(e.ops.GetBindToDevice())
if e.ID.LocalPort == 0 {
- port, err := e.stack.ReservePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.portFlags, bindToDevice, tcpip.FullAddress{}, nil /* testPort */)
+ portRes := ports.Reservation{
+ Networks: netProtos,
+ Transport: ProtocolNumber,
+ Addr: id.LocalAddress,
+ Port: id.LocalPort,
+ Flags: e.portFlags,
+ BindToDevice: bindToDevice,
+ Dest: tcpip.FullAddress{},
+ }
+ port, err := e.stack.ReservePort(portRes, nil /* testPort */)
if err != nil {
return id, bindToDevice, err
}
@@ -1082,7 +1109,16 @@ func (e *endpoint) registerWithStack(netProtos []tcpip.NetworkProtocolNumber, id
err := e.stack.RegisterTransportEndpoint(netProtos, ProtocolNumber, id, e, e.boundPortFlags, bindToDevice)
if err != nil {
- e.stack.ReleasePort(netProtos, ProtocolNumber, id.LocalAddress, id.LocalPort, e.boundPortFlags, bindToDevice, tcpip.FullAddress{})
+ portRes := ports.Reservation{
+ Networks: netProtos,
+ Transport: ProtocolNumber,
+ Addr: id.LocalAddress,
+ Port: id.LocalPort,
+ Flags: e.boundPortFlags,
+ BindToDevice: bindToDevice,
+ Dest: tcpip.FullAddress{},
+ }
+ e.stack.ReleasePort(portRes)
e.boundPortFlags = ports.Flags{}
}
return id, bindToDevice, err
@@ -1227,7 +1263,7 @@ func verifyChecksum(hdr header.UDP, pkt *stack.PacketBuffer) bool {
(hdr.Checksum() != 0 || pkt.NetworkProtocolNumber == header.IPv6ProtocolNumber) {
netHdr := pkt.Network()
xsum := header.PseudoHeaderChecksum(ProtocolNumber, netHdr.DestinationAddress(), netHdr.SourceAddress(), hdr.Length())
- for _, v := range pkt.Data.Views() {
+ for _, v := range pkt.Data().Views() {
xsum = header.Checksum(v, xsum)
}
return hdr.CalculateChecksum(xsum) == 0xffff
@@ -1240,7 +1276,7 @@ func verifyChecksum(hdr header.UDP, pkt *stack.PacketBuffer) bool {
func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) {
// Get the header then trim it from the view.
hdr := header.UDP(pkt.TransportHeader().View())
- if int(hdr.Length()) > pkt.Data.Size()+header.UDPMinimumSize {
+ if int(hdr.Length()) > pkt.Data().Size()+header.UDPMinimumSize {
// Malformed packet.
e.stack.Stats().UDP.MalformedPacketsReceived.Increment()
e.stats.ReceiveErrors.MalformedPacketsReceived.Increment()
@@ -1287,10 +1323,10 @@ func (e *endpoint) HandlePacket(id stack.TransportEndpointID, pkt *stack.PacketB
Addr: id.LocalAddress,
Port: header.UDP(hdr).DestinationPort(),
},
+ data: pkt.Data().ExtractVV(),
}
- packet.data = pkt.Data
e.rcvList.PushBack(packet)
- e.rcvBufSize += pkt.Data.Size()
+ e.rcvBufSize += packet.data.Size()
// Save any useful information from the network header to the packet.
switch pkt.NetworkProtocolNumber {
@@ -1327,7 +1363,7 @@ func (e *endpoint) onICMPError(err tcpip.Error, transErr stack.TransportError, p
if e.SocketOptions().GetRecvError() {
// Linux passes the payload without the UDP header.
var payload []byte
- udp := header.UDP(pkt.Data.ToView())
+ udp := header.UDP(pkt.Data().AsRange().ToOwnedView())
if len(udp) >= header.UDPMinimumSize {
payload = udp.Payload()
}
diff --git a/pkg/tcpip/transport/udp/protocol.go b/pkg/tcpip/transport/udp/protocol.go
index 427fdd0c9..1171aeb79 100644
--- a/pkg/tcpip/transport/udp/protocol.go
+++ b/pkg/tcpip/transport/udp/protocol.go
@@ -80,7 +80,7 @@ func (*protocol) ParsePorts(v buffer.View) (src, dst uint16, err tcpip.Error) {
// protocol but don't match any existing endpoint.
func (p *protocol) HandleUnknownDestinationPacket(id stack.TransportEndpointID, pkt *stack.PacketBuffer) stack.UnknownDestinationPacketDisposition {
hdr := header.UDP(pkt.TransportHeader().View())
- if int(hdr.Length()) > pkt.Data.Size()+header.UDPMinimumSize {
+ if int(hdr.Length()) > pkt.Data().Size()+header.UDPMinimumSize {
p.stack.Stats().UDP.MalformedPacketsReceived.Increment()
return stack.UnknownDestinationPacketMalformed
}
diff --git a/runsc/boot/compat.go b/runsc/boot/compat.go
index a3a76b609..28e82e117 100644
--- a/runsc/boot/compat.go
+++ b/runsc/boot/compat.go
@@ -17,8 +17,8 @@ package boot
import (
"fmt"
"os"
- "syscall"
+ "golang.org/x/sys/unix"
"google.golang.org/protobuf/proto"
"gvisor.dev/gvisor/pkg/eventchannel"
"gvisor.dev/gvisor/pkg/log"
@@ -93,19 +93,19 @@ func (c *compatEmitter) emitUnimplementedSyscall(us *spb.UnimplementedSyscall) {
tr := c.trackers[sysnr]
if tr == nil {
switch sysnr {
- case syscall.SYS_PRCTL:
+ case unix.SYS_PRCTL:
// args: cmd, ...
tr = newArgsTracker(0)
- case syscall.SYS_IOCTL, syscall.SYS_EPOLL_CTL, syscall.SYS_SHMCTL, syscall.SYS_FUTEX, syscall.SYS_FALLOCATE:
+ case unix.SYS_IOCTL, unix.SYS_EPOLL_CTL, unix.SYS_SHMCTL, unix.SYS_FUTEX, unix.SYS_FALLOCATE:
// args: fd/addr, cmd, ...
tr = newArgsTracker(1)
- case syscall.SYS_GETSOCKOPT, syscall.SYS_SETSOCKOPT:
+ case unix.SYS_GETSOCKOPT, unix.SYS_SETSOCKOPT:
// args: fd, level, name, ...
tr = newArgsTracker(1, 2)
- case syscall.SYS_SEMCTL:
+ case unix.SYS_SEMCTL:
// args: semid, semnum, cmd, ...
tr = newArgsTracker(2)
@@ -131,7 +131,7 @@ func (c *compatEmitter) emitUnimplementedSyscall(us *spb.UnimplementedSyscall) {
}
func (c *compatEmitter) emitUncaughtSignal(msg *ucspb.UncaughtSignal) {
- sig := syscall.Signal(msg.SignalNumber)
+ sig := unix.Signal(msg.SignalNumber)
c.sink.Infof(
"Uncaught signal: %q (%d), PID: %d, TID: %d, fault addr: %#x",
sig, msg.SignalNumber, msg.Pid, msg.Tid, msg.FaultAddr)
diff --git a/runsc/boot/compat_amd64.go b/runsc/boot/compat_amd64.go
index 8eb76b2ba..7e13ff87c 100644
--- a/runsc/boot/compat_amd64.go
+++ b/runsc/boot/compat_amd64.go
@@ -16,8 +16,8 @@ package boot
import (
"fmt"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi"
"gvisor.dev/gvisor/pkg/sentry/arch"
rpb "gvisor.dev/gvisor/pkg/sentry/arch/registers_go_proto"
@@ -92,7 +92,7 @@ func syscallNum(regs *rpb.Registers) uint64 {
func newArchArgsTracker(sysnr uint64) syscallTracker {
switch sysnr {
- case syscall.SYS_ARCH_PRCTL:
+ case unix.SYS_ARCH_PRCTL:
// args: cmd, ...
return newArgsTracker(0)
}
diff --git a/runsc/boot/controller.go b/runsc/boot/controller.go
index 5e849cb37..1cd5fba5c 100644
--- a/runsc/boot/controller.go
+++ b/runsc/boot/controller.go
@@ -18,9 +18,9 @@ import (
"errors"
"fmt"
"os"
- "syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/control/server"
"gvisor.dev/gvisor/pkg/fd"
"gvisor.dev/gvisor/pkg/log"
@@ -366,7 +366,7 @@ func (cm *containerManager) Restore(o *RestoreOpts, _ *struct{}) error {
case 2:
// The device file is donated to the platform.
// Can't take ownership away from os.File. dup them to get a new FD.
- fd, err := syscall.Dup(int(o.Files[1].Fd()))
+ fd, err := unix.Dup(int(o.Files[1].Fd()))
if err != nil {
return fmt.Errorf("failed to dup file: %v", err)
}
diff --git a/runsc/boot/filter/config.go b/runsc/boot/filter/config.go
index 2a8c916d5..49b503f99 100644
--- a/runsc/boot/filter/config.go
+++ b/runsc/boot/filter/config.go
@@ -16,7 +16,6 @@ package filter
import (
"os"
- "syscall"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
@@ -26,19 +25,19 @@ import (
// allowedSyscalls is the set of syscalls executed by the Sentry to the host OS.
var allowedSyscalls = seccomp.SyscallRules{
- syscall.SYS_CLOCK_GETTIME: {},
- syscall.SYS_CLOSE: {},
- syscall.SYS_DUP: {},
- syscall.SYS_DUP3: []seccomp.Rule{
+ unix.SYS_CLOCK_GETTIME: {},
+ unix.SYS_CLOSE: {},
+ unix.SYS_DUP: {},
+ unix.SYS_DUP3: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.O_CLOEXEC),
+ seccomp.EqualTo(unix.O_CLOEXEC),
},
},
- syscall.SYS_EPOLL_CREATE1: {},
- syscall.SYS_EPOLL_CTL: {},
- syscall.SYS_EPOLL_PWAIT: []seccomp.Rule{
+ unix.SYS_EPOLL_CREATE1: {},
+ unix.SYS_EPOLL_CTL: {},
+ unix.SYS_EPOLL_PWAIT: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
@@ -47,34 +46,34 @@ var allowedSyscalls = seccomp.SyscallRules{
seccomp.EqualTo(0),
},
},
- syscall.SYS_EVENTFD2: []seccomp.Rule{
+ unix.SYS_EVENTFD2: []seccomp.Rule{
{
seccomp.EqualTo(0),
seccomp.EqualTo(0),
},
},
- syscall.SYS_EXIT: {},
- syscall.SYS_EXIT_GROUP: {},
- syscall.SYS_FALLOCATE: {},
- syscall.SYS_FCHMOD: {},
- syscall.SYS_FCNTL: []seccomp.Rule{
+ unix.SYS_EXIT: {},
+ unix.SYS_EXIT_GROUP: {},
+ unix.SYS_FALLOCATE: {},
+ unix.SYS_FCHMOD: {},
+ unix.SYS_FCNTL: []seccomp.Rule{
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.F_GETFL),
+ seccomp.EqualTo(unix.F_GETFL),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.F_SETFL),
+ seccomp.EqualTo(unix.F_SETFL),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.F_GETFD),
+ seccomp.EqualTo(unix.F_GETFD),
},
},
- syscall.SYS_FSTAT: {},
- syscall.SYS_FSYNC: {},
- syscall.SYS_FTRUNCATE: {},
- syscall.SYS_FUTEX: []seccomp.Rule{
+ unix.SYS_FSTAT: {},
+ unix.SYS_FSYNC: {},
+ unix.SYS_FTRUNCATE: {},
+ unix.SYS_FUTEX: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.EqualTo(linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG),
@@ -109,35 +108,35 @@ var allowedSyscalls = seccomp.SyscallRules{
seccomp.EqualTo(0),
},
},
- syscall.SYS_GETPID: {},
+ unix.SYS_GETPID: {},
unix.SYS_GETRANDOM: {},
- syscall.SYS_GETSOCKOPT: []seccomp.Rule{
+ unix.SYS_GETSOCKOPT: []seccomp.Rule{
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_DOMAIN),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_DOMAIN),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_TYPE),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_TYPE),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_ERROR),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_ERROR),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_SNDBUF),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_SNDBUF),
},
},
- syscall.SYS_GETTID: {},
- syscall.SYS_GETTIMEOFDAY: {},
+ unix.SYS_GETTID: {},
+ unix.SYS_GETTIMEOFDAY: {},
// SYS_IOCTL is needed for terminal support, but we only allow
// setting/getting termios and winsize.
- syscall.SYS_IOCTL: []seccomp.Rule{
+ unix.SYS_IOCTL: []seccomp.Rule{
{
seccomp.MatchAny{}, /* fd */
seccomp.EqualTo(linux.TCGETS),
@@ -169,94 +168,94 @@ var allowedSyscalls = seccomp.SyscallRules{
seccomp.MatchAny{}, /* winsize struct */
},
},
- syscall.SYS_LSEEK: {},
- syscall.SYS_MADVISE: {},
+ unix.SYS_LSEEK: {},
+ unix.SYS_MADVISE: {},
unix.SYS_MEMBARRIER: []seccomp.Rule{
{
seccomp.EqualTo(linux.MEMBARRIER_CMD_GLOBAL),
seccomp.EqualTo(0),
},
},
- syscall.SYS_MINCORE: {},
+ unix.SYS_MINCORE: {},
// Used by the Go runtime as a temporarily workaround for a Linux
// 5.2-5.4 bug.
//
// See src/runtime/os_linux_x86.go.
//
// TODO(b/148688965): Remove once this is gone from Go.
- syscall.SYS_MLOCK: []seccomp.Rule{
+ unix.SYS_MLOCK: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.EqualTo(4096),
},
},
- syscall.SYS_MMAP: []seccomp.Rule{
+ unix.SYS_MMAP: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MAP_SHARED),
+ seccomp.EqualTo(unix.MAP_SHARED),
},
{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MAP_PRIVATE),
+ seccomp.EqualTo(unix.MAP_PRIVATE),
},
{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS),
+ seccomp.EqualTo(unix.MAP_PRIVATE | unix.MAP_ANONYMOUS),
},
{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS | syscall.MAP_STACK),
+ seccomp.EqualTo(unix.MAP_PRIVATE | unix.MAP_ANONYMOUS | unix.MAP_STACK),
},
{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS | syscall.MAP_NORESERVE),
+ seccomp.EqualTo(unix.MAP_PRIVATE | unix.MAP_ANONYMOUS | unix.MAP_NORESERVE),
},
{
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.PROT_WRITE | syscall.PROT_READ),
- seccomp.EqualTo(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS | syscall.MAP_FIXED),
+ seccomp.EqualTo(unix.PROT_WRITE | unix.PROT_READ),
+ seccomp.EqualTo(unix.MAP_PRIVATE | unix.MAP_ANONYMOUS | unix.MAP_FIXED),
},
},
- syscall.SYS_MPROTECT: {},
- syscall.SYS_MUNMAP: {},
- syscall.SYS_NANOSLEEP: {},
- syscall.SYS_PPOLL: {},
- syscall.SYS_PREAD64: {},
- syscall.SYS_PREADV: {},
- unix.SYS_PREADV2: {},
- syscall.SYS_PWRITE64: {},
- syscall.SYS_PWRITEV: {},
- unix.SYS_PWRITEV2: {},
- syscall.SYS_READ: {},
- syscall.SYS_RECVMSG: []seccomp.Rule{
+ unix.SYS_MPROTECT: {},
+ unix.SYS_MUNMAP: {},
+ unix.SYS_NANOSLEEP: {},
+ unix.SYS_PPOLL: {},
+ unix.SYS_PREAD64: {},
+ unix.SYS_PREADV: {},
+ unix.SYS_PREADV2: {},
+ unix.SYS_PWRITE64: {},
+ unix.SYS_PWRITEV: {},
+ unix.SYS_PWRITEV2: {},
+ unix.SYS_READ: {},
+ unix.SYS_RECVMSG: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC),
+ seccomp.EqualTo(unix.MSG_DONTWAIT | unix.MSG_TRUNC),
},
{
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC | syscall.MSG_PEEK),
+ seccomp.EqualTo(unix.MSG_DONTWAIT | unix.MSG_TRUNC | unix.MSG_PEEK),
},
},
- syscall.SYS_RECVMMSG: []seccomp.Rule{
+ unix.SYS_RECVMMSG: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.EqualTo(fdbased.MaxMsgsPerRecv),
- seccomp.EqualTo(syscall.MSG_DONTWAIT),
+ seccomp.EqualTo(unix.MSG_DONTWAIT),
seccomp.EqualTo(0),
},
},
@@ -265,34 +264,34 @@ var allowedSyscalls = seccomp.SyscallRules{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MSG_DONTWAIT),
+ seccomp.EqualTo(unix.MSG_DONTWAIT),
seccomp.EqualTo(0),
},
},
- syscall.SYS_RESTART_SYSCALL: {},
- syscall.SYS_RT_SIGACTION: {},
- syscall.SYS_RT_SIGPROCMASK: {},
- syscall.SYS_RT_SIGRETURN: {},
- syscall.SYS_SCHED_YIELD: {},
- syscall.SYS_SENDMSG: []seccomp.Rule{
+ unix.SYS_RESTART_SYSCALL: {},
+ unix.SYS_RT_SIGACTION: {},
+ unix.SYS_RT_SIGPROCMASK: {},
+ unix.SYS_RT_SIGRETURN: {},
+ unix.SYS_SCHED_YIELD: {},
+ unix.SYS_SENDMSG: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MSG_DONTWAIT | syscall.MSG_NOSIGNAL),
+ seccomp.EqualTo(unix.MSG_DONTWAIT | unix.MSG_NOSIGNAL),
},
},
- syscall.SYS_SETITIMER: {},
- syscall.SYS_SHUTDOWN: []seccomp.Rule{
+ unix.SYS_SETITIMER: {},
+ unix.SYS_SHUTDOWN: []seccomp.Rule{
// Used by fs/host to shutdown host sockets.
- {seccomp.MatchAny{}, seccomp.EqualTo(syscall.SHUT_RD)},
- {seccomp.MatchAny{}, seccomp.EqualTo(syscall.SHUT_WR)},
+ {seccomp.MatchAny{}, seccomp.EqualTo(unix.SHUT_RD)},
+ {seccomp.MatchAny{}, seccomp.EqualTo(unix.SHUT_WR)},
// Used by unet to shutdown connections.
- {seccomp.MatchAny{}, seccomp.EqualTo(syscall.SHUT_RDWR)},
+ {seccomp.MatchAny{}, seccomp.EqualTo(unix.SHUT_RDWR)},
},
- syscall.SYS_SIGALTSTACK: {},
- unix.SYS_STATX: {},
- syscall.SYS_SYNC_FILE_RANGE: {},
- syscall.SYS_TEE: []seccomp.Rule{
+ unix.SYS_SIGALTSTACK: {},
+ unix.SYS_STATX: {},
+ unix.SYS_SYNC_FILE_RANGE: {},
+ unix.SYS_TEE: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
@@ -300,12 +299,12 @@ var allowedSyscalls = seccomp.SyscallRules{
seccomp.EqualTo(unix.SPLICE_F_NONBLOCK), /* flags */
},
},
- syscall.SYS_TGKILL: []seccomp.Rule{
+ unix.SYS_TGKILL: []seccomp.Rule{
{
seccomp.EqualTo(uint64(os.Getpid())),
},
},
- syscall.SYS_UTIMENSAT: []seccomp.Rule{
+ unix.SYS_UTIMENSAT: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.EqualTo(0), /* null pathname */
@@ -313,9 +312,9 @@ var allowedSyscalls = seccomp.SyscallRules{
seccomp.EqualTo(0), /* flags */
},
},
- syscall.SYS_WRITE: {},
+ unix.SYS_WRITE: {},
// For rawfile.NonBlockingWriteIovec.
- syscall.SYS_WRITEV: []seccomp.Rule{
+ unix.SYS_WRITEV: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
@@ -327,313 +326,313 @@ var allowedSyscalls = seccomp.SyscallRules{
// hostInetFilters contains syscalls that are needed by sentry/socket/hostinet.
func hostInetFilters() seccomp.SyscallRules {
return seccomp.SyscallRules{
- syscall.SYS_ACCEPT4: []seccomp.Rule{
+ unix.SYS_ACCEPT4: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOCK_NONBLOCK | syscall.SOCK_CLOEXEC),
+ seccomp.EqualTo(unix.SOCK_NONBLOCK | unix.SOCK_CLOEXEC),
},
},
- syscall.SYS_BIND: {},
- syscall.SYS_CONNECT: {},
- syscall.SYS_GETPEERNAME: {},
- syscall.SYS_GETSOCKNAME: {},
- syscall.SYS_GETSOCKOPT: []seccomp.Rule{
+ unix.SYS_BIND: {},
+ unix.SYS_CONNECT: {},
+ unix.SYS_GETPEERNAME: {},
+ unix.SYS_GETSOCKNAME: {},
+ unix.SYS_GETSOCKOPT: []seccomp.Rule{
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IP),
- seccomp.EqualTo(syscall.IP_TOS),
+ seccomp.EqualTo(unix.SOL_IP),
+ seccomp.EqualTo(unix.IP_TOS),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IP),
- seccomp.EqualTo(syscall.IP_RECVTOS),
+ seccomp.EqualTo(unix.SOL_IP),
+ seccomp.EqualTo(unix.IP_RECVTOS),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IP),
- seccomp.EqualTo(syscall.IP_PKTINFO),
+ seccomp.EqualTo(unix.SOL_IP),
+ seccomp.EqualTo(unix.IP_PKTINFO),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IP),
- seccomp.EqualTo(syscall.IP_RECVORIGDSTADDR),
+ seccomp.EqualTo(unix.SOL_IP),
+ seccomp.EqualTo(unix.IP_RECVORIGDSTADDR),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IP),
- seccomp.EqualTo(syscall.IP_RECVERR),
+ seccomp.EqualTo(unix.SOL_IP),
+ seccomp.EqualTo(unix.IP_RECVERR),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IPV6),
- seccomp.EqualTo(syscall.IPV6_TCLASS),
+ seccomp.EqualTo(unix.SOL_IPV6),
+ seccomp.EqualTo(unix.IPV6_TCLASS),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IPV6),
- seccomp.EqualTo(syscall.IPV6_RECVTCLASS),
+ seccomp.EqualTo(unix.SOL_IPV6),
+ seccomp.EqualTo(unix.IPV6_RECVTCLASS),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IPV6),
- seccomp.EqualTo(syscall.IPV6_RECVERR),
+ seccomp.EqualTo(unix.SOL_IPV6),
+ seccomp.EqualTo(unix.IPV6_RECVERR),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IPV6),
- seccomp.EqualTo(syscall.IPV6_V6ONLY),
+ seccomp.EqualTo(unix.SOL_IPV6),
+ seccomp.EqualTo(unix.IPV6_V6ONLY),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IPV6),
+ seccomp.EqualTo(unix.SOL_IPV6),
seccomp.EqualTo(linux.IPV6_RECVORIGDSTADDR),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_ERROR),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_ERROR),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_KEEPALIVE),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_KEEPALIVE),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_SNDBUF),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_SNDBUF),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_RCVBUF),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_RCVBUF),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_REUSEADDR),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_REUSEADDR),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_TYPE),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_TYPE),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_LINGER),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_LINGER),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_TIMESTAMP),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_TIMESTAMP),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_TCP),
- seccomp.EqualTo(syscall.TCP_NODELAY),
+ seccomp.EqualTo(unix.SOL_TCP),
+ seccomp.EqualTo(unix.TCP_NODELAY),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_TCP),
- seccomp.EqualTo(syscall.TCP_INFO),
+ seccomp.EqualTo(unix.SOL_TCP),
+ seccomp.EqualTo(unix.TCP_INFO),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_TCP),
+ seccomp.EqualTo(unix.SOL_TCP),
seccomp.EqualTo(linux.TCP_INQ),
},
},
- syscall.SYS_IOCTL: []seccomp.Rule{
+ unix.SYS_IOCTL: []seccomp.Rule{
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.TIOCOUTQ),
+ seccomp.EqualTo(unix.TIOCOUTQ),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.TIOCINQ),
+ seccomp.EqualTo(unix.TIOCINQ),
},
},
- syscall.SYS_LISTEN: {},
- syscall.SYS_READV: {},
- syscall.SYS_RECVFROM: {},
- syscall.SYS_RECVMSG: {},
- syscall.SYS_SENDMSG: {},
- syscall.SYS_SENDTO: {},
- syscall.SYS_SETSOCKOPT: []seccomp.Rule{
+ unix.SYS_LISTEN: {},
+ unix.SYS_READV: {},
+ unix.SYS_RECVFROM: {},
+ unix.SYS_RECVMSG: {},
+ unix.SYS_SENDMSG: {},
+ unix.SYS_SENDTO: {},
+ unix.SYS_SETSOCKOPT: []seccomp.Rule{
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_SNDBUF),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_SNDBUF),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_RCVBUF),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_RCVBUF),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_REUSEADDR),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_REUSEADDR),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_TIMESTAMP),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_TIMESTAMP),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_TCP),
- seccomp.EqualTo(syscall.TCP_NODELAY),
+ seccomp.EqualTo(unix.SOL_TCP),
+ seccomp.EqualTo(unix.TCP_NODELAY),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_TCP),
+ seccomp.EqualTo(unix.SOL_TCP),
seccomp.EqualTo(linux.TCP_INQ),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IP),
- seccomp.EqualTo(syscall.IP_TOS),
+ seccomp.EqualTo(unix.SOL_IP),
+ seccomp.EqualTo(unix.IP_TOS),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IP),
- seccomp.EqualTo(syscall.IP_RECVTOS),
+ seccomp.EqualTo(unix.SOL_IP),
+ seccomp.EqualTo(unix.IP_RECVTOS),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IP),
- seccomp.EqualTo(syscall.IP_PKTINFO),
+ seccomp.EqualTo(unix.SOL_IP),
+ seccomp.EqualTo(unix.IP_PKTINFO),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IP),
- seccomp.EqualTo(syscall.IP_RECVORIGDSTADDR),
+ seccomp.EqualTo(unix.SOL_IP),
+ seccomp.EqualTo(unix.IP_RECVORIGDSTADDR),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IP),
- seccomp.EqualTo(syscall.IP_RECVERR),
+ seccomp.EqualTo(unix.SOL_IP),
+ seccomp.EqualTo(unix.IP_RECVERR),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IPV6),
- seccomp.EqualTo(syscall.IPV6_TCLASS),
+ seccomp.EqualTo(unix.SOL_IPV6),
+ seccomp.EqualTo(unix.IPV6_TCLASS),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IPV6),
- seccomp.EqualTo(syscall.IPV6_RECVTCLASS),
+ seccomp.EqualTo(unix.SOL_IPV6),
+ seccomp.EqualTo(unix.IPV6_RECVTCLASS),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IPV6),
+ seccomp.EqualTo(unix.SOL_IPV6),
seccomp.EqualTo(linux.IPV6_RECVORIGDSTADDR),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IPV6),
- seccomp.EqualTo(syscall.IPV6_RECVERR),
+ seccomp.EqualTo(unix.SOL_IPV6),
+ seccomp.EqualTo(unix.IPV6_RECVERR),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_IPV6),
- seccomp.EqualTo(syscall.IPV6_V6ONLY),
+ seccomp.EqualTo(unix.SOL_IPV6),
+ seccomp.EqualTo(unix.IPV6_V6ONLY),
seccomp.MatchAny{},
seccomp.EqualTo(4),
},
},
- syscall.SYS_SHUTDOWN: []seccomp.Rule{
+ unix.SYS_SHUTDOWN: []seccomp.Rule{
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SHUT_RD),
+ seccomp.EqualTo(unix.SHUT_RD),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SHUT_WR),
+ seccomp.EqualTo(unix.SHUT_WR),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SHUT_RDWR),
+ seccomp.EqualTo(unix.SHUT_RDWR),
},
},
- syscall.SYS_SOCKET: []seccomp.Rule{
+ unix.SYS_SOCKET: []seccomp.Rule{
{
- seccomp.EqualTo(syscall.AF_INET),
- seccomp.EqualTo(syscall.SOCK_STREAM | syscall.SOCK_NONBLOCK | syscall.SOCK_CLOEXEC),
+ seccomp.EqualTo(unix.AF_INET),
+ seccomp.EqualTo(unix.SOCK_STREAM | unix.SOCK_NONBLOCK | unix.SOCK_CLOEXEC),
seccomp.EqualTo(0),
},
{
- seccomp.EqualTo(syscall.AF_INET),
- seccomp.EqualTo(syscall.SOCK_DGRAM | syscall.SOCK_NONBLOCK | syscall.SOCK_CLOEXEC),
+ seccomp.EqualTo(unix.AF_INET),
+ seccomp.EqualTo(unix.SOCK_DGRAM | unix.SOCK_NONBLOCK | unix.SOCK_CLOEXEC),
seccomp.EqualTo(0),
},
{
- seccomp.EqualTo(syscall.AF_INET6),
- seccomp.EqualTo(syscall.SOCK_STREAM | syscall.SOCK_NONBLOCK | syscall.SOCK_CLOEXEC),
+ seccomp.EqualTo(unix.AF_INET6),
+ seccomp.EqualTo(unix.SOCK_STREAM | unix.SOCK_NONBLOCK | unix.SOCK_CLOEXEC),
seccomp.EqualTo(0),
},
{
- seccomp.EqualTo(syscall.AF_INET6),
- seccomp.EqualTo(syscall.SOCK_DGRAM | syscall.SOCK_NONBLOCK | syscall.SOCK_CLOEXEC),
+ seccomp.EqualTo(unix.AF_INET6),
+ seccomp.EqualTo(unix.SOCK_DGRAM | unix.SOCK_NONBLOCK | unix.SOCK_CLOEXEC),
seccomp.EqualTo(0),
},
},
- syscall.SYS_WRITEV: {},
+ unix.SYS_WRITEV: {},
}
}
func controlServerFilters(fd int) seccomp.SyscallRules {
return seccomp.SyscallRules{
- syscall.SYS_ACCEPT: []seccomp.Rule{
+ unix.SYS_ACCEPT: []seccomp.Rule{
{
seccomp.EqualTo(fd),
},
},
- syscall.SYS_LISTEN: []seccomp.Rule{
+ unix.SYS_LISTEN: []seccomp.Rule{
{
seccomp.EqualTo(fd),
seccomp.EqualTo(16 /* unet.backlog */),
},
},
- syscall.SYS_GETSOCKOPT: []seccomp.Rule{
+ unix.SYS_GETSOCKOPT: []seccomp.Rule{
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.SOL_SOCKET),
- seccomp.EqualTo(syscall.SO_PEERCRED),
+ seccomp.EqualTo(unix.SOL_SOCKET),
+ seccomp.EqualTo(unix.SO_PEERCRED),
},
},
}
diff --git a/runsc/boot/filter/config_amd64.go b/runsc/boot/filter/config_amd64.go
index cea5613b8..42cb8ed3a 100644
--- a/runsc/boot/filter/config_amd64.go
+++ b/runsc/boot/filter/config_amd64.go
@@ -17,30 +17,29 @@
package filter
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/seccomp"
)
func init() {
- allowedSyscalls[syscall.SYS_ARCH_PRCTL] = []seccomp.Rule{
+ allowedSyscalls[unix.SYS_ARCH_PRCTL] = []seccomp.Rule{
// TODO(b/168828518): No longer used in Go 1.16+.
{seccomp.EqualTo(linux.ARCH_SET_FS)},
}
- allowedSyscalls[syscall.SYS_CLONE] = []seccomp.Rule{
+ allowedSyscalls[unix.SYS_CLONE] = []seccomp.Rule{
// parent_tidptr and child_tidptr are always 0 because neither
// CLONE_PARENT_SETTID nor CLONE_CHILD_SETTID are used.
{
seccomp.EqualTo(
- syscall.CLONE_VM |
- syscall.CLONE_FS |
- syscall.CLONE_FILES |
- syscall.CLONE_SETTLS |
- syscall.CLONE_SIGHAND |
- syscall.CLONE_SYSVSEM |
- syscall.CLONE_THREAD),
+ unix.CLONE_VM |
+ unix.CLONE_FS |
+ unix.CLONE_FILES |
+ unix.CLONE_SETTLS |
+ unix.CLONE_SIGHAND |
+ unix.CLONE_SYSVSEM |
+ unix.CLONE_THREAD),
seccomp.MatchAny{}, // newsp
seccomp.EqualTo(0), // parent_tidptr
seccomp.EqualTo(0), // child_tidptr
@@ -49,12 +48,12 @@ func init() {
{
// TODO(b/168828518): No longer used in Go 1.16+ (on amd64).
seccomp.EqualTo(
- syscall.CLONE_VM |
- syscall.CLONE_FS |
- syscall.CLONE_FILES |
- syscall.CLONE_SIGHAND |
- syscall.CLONE_SYSVSEM |
- syscall.CLONE_THREAD),
+ unix.CLONE_VM |
+ unix.CLONE_FS |
+ unix.CLONE_FILES |
+ unix.CLONE_SIGHAND |
+ unix.CLONE_SYSVSEM |
+ unix.CLONE_THREAD),
seccomp.MatchAny{}, // newsp
seccomp.EqualTo(0), // parent_tidptr
seccomp.EqualTo(0), // child_tidptr
diff --git a/runsc/boot/filter/config_arm64.go b/runsc/boot/filter/config_arm64.go
index 37313f97f..f162f87ff 100644
--- a/runsc/boot/filter/config_arm64.go
+++ b/runsc/boot/filter/config_arm64.go
@@ -17,21 +17,20 @@
package filter
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/seccomp"
)
func init() {
- allowedSyscalls[syscall.SYS_CLONE] = []seccomp.Rule{
+ allowedSyscalls[unix.SYS_CLONE] = []seccomp.Rule{
{
seccomp.EqualTo(
- syscall.CLONE_VM |
- syscall.CLONE_FS |
- syscall.CLONE_FILES |
- syscall.CLONE_SIGHAND |
- syscall.CLONE_SYSVSEM |
- syscall.CLONE_THREAD),
+ unix.CLONE_VM |
+ unix.CLONE_FS |
+ unix.CLONE_FILES |
+ unix.CLONE_SIGHAND |
+ unix.CLONE_SYSVSEM |
+ unix.CLONE_THREAD),
seccomp.MatchAny{}, // newsp
// These arguments are left uninitialized by the Go
// runtime, so they may be anything (and are unused by
diff --git a/runsc/boot/filter/config_profile.go b/runsc/boot/filter/config_profile.go
index 7b8669595..89b66a6da 100644
--- a/runsc/boot/filter/config_profile.go
+++ b/runsc/boot/filter/config_profile.go
@@ -15,19 +15,18 @@
package filter
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/seccomp"
)
// profileFilters returns extra syscalls made by runtime/pprof package.
func profileFilters() seccomp.SyscallRules {
return seccomp.SyscallRules{
- syscall.SYS_OPENAT: []seccomp.Rule{
+ unix.SYS_OPENAT: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.O_RDONLY | syscall.O_LARGEFILE | syscall.O_CLOEXEC),
+ seccomp.EqualTo(unix.O_RDONLY | unix.O_LARGEFILE | unix.O_CLOEXEC),
},
},
}
diff --git a/runsc/boot/filter/extra_filters_msan.go b/runsc/boot/filter/extra_filters_msan.go
index 209e646a7..41baa78cd 100644
--- a/runsc/boot/filter/extra_filters_msan.go
+++ b/runsc/boot/filter/extra_filters_msan.go
@@ -17,8 +17,7 @@
package filter
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/seccomp"
)
@@ -26,9 +25,9 @@ import (
func instrumentationFilters() seccomp.SyscallRules {
Report("MSAN is enabled: syscall filters less restrictive!")
return seccomp.SyscallRules{
- syscall.SYS_CLONE: {},
- syscall.SYS_MMAP: {},
- syscall.SYS_SCHED_GETAFFINITY: {},
- syscall.SYS_SET_ROBUST_LIST: {},
+ unix.SYS_CLONE: {},
+ unix.SYS_MMAP: {},
+ unix.SYS_SCHED_GETAFFINITY: {},
+ unix.SYS_SET_ROBUST_LIST: {},
}
}
diff --git a/runsc/boot/filter/extra_filters_race.go b/runsc/boot/filter/extra_filters_race.go
index 5b99eb8cd..79b2104f0 100644
--- a/runsc/boot/filter/extra_filters_race.go
+++ b/runsc/boot/filter/extra_filters_race.go
@@ -17,8 +17,7 @@
package filter
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/seccomp"
)
@@ -26,17 +25,17 @@ import (
func instrumentationFilters() seccomp.SyscallRules {
Report("TSAN is enabled: syscall filters less restrictive!")
return seccomp.SyscallRules{
- syscall.SYS_BRK: {},
- syscall.SYS_CLOCK_NANOSLEEP: {},
- syscall.SYS_CLONE: {},
- syscall.SYS_FUTEX: {},
- syscall.SYS_MMAP: {},
- syscall.SYS_MUNLOCK: {},
- syscall.SYS_NANOSLEEP: {},
- syscall.SYS_OPEN: {},
- syscall.SYS_OPENAT: {},
- syscall.SYS_SET_ROBUST_LIST: {},
+ unix.SYS_BRK: {},
+ unix.SYS_CLOCK_NANOSLEEP: {},
+ unix.SYS_CLONE: {},
+ unix.SYS_FUTEX: {},
+ unix.SYS_MMAP: {},
+ unix.SYS_MUNLOCK: {},
+ unix.SYS_NANOSLEEP: {},
+ unix.SYS_OPEN: {},
+ unix.SYS_OPENAT: {},
+ unix.SYS_SET_ROBUST_LIST: {},
// Used within glibc's malloc.
- syscall.SYS_TIME: {},
+ unix.SYS_TIME: {},
}
}
diff --git a/runsc/boot/fs.go b/runsc/boot/fs.go
index 2b0d2cd51..77f632bb9 100644
--- a/runsc/boot/fs.go
+++ b/runsc/boot/fs.go
@@ -20,9 +20,9 @@ import (
"sort"
"strconv"
"strings"
- "syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/context"
"gvisor.dev/gvisor/pkg/fd"
@@ -312,11 +312,11 @@ func setupContainerFS(ctx context.Context, conf *config.Config, mntr *containerM
}
func adjustDirentCache(k *kernel.Kernel) error {
- var hl syscall.Rlimit
- if err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &hl); err != nil {
+ var hl unix.Rlimit
+ if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &hl); err != nil {
return fmt.Errorf("getting RLIMIT_NOFILE: %v", err)
}
- if int64(hl.Cur) != syscall.RLIM_INFINITY {
+ if hl.Cur != unix.RLIM_INFINITY {
newSize := hl.Cur / 2
if newSize < gofer.DefaultDirentCacheSize {
log.Infof("Setting gofer dirent cache size to %d", newSize)
@@ -844,10 +844,10 @@ func (c *containerMounter) mountSubmount(ctx context.Context, conf *config.Confi
// than simply printed to the logs for the 'runsc boot' command.
//
// We check the error message string rather than type because the
- // actual error types (syscall.EIO, syscall.EPIPE) are lost by file system
+ // actual error types (unix.EIO, unix.EPIPE) are lost by file system
// implementation (e.g. p9).
// TODO(gvisor.dev/issue/1765): Remove message when bug is resolved.
- if strings.Contains(err.Error(), syscall.EIO.Error()) || strings.Contains(err.Error(), syscall.EPIPE.Error()) {
+ if strings.Contains(err.Error(), unix.EIO.Error()) || strings.Contains(err.Error(), unix.EPIPE.Error()) {
return fmt.Errorf("%v: %s", err, specutils.FaqErrorMsg("memlock", "you may be encountering a Linux kernel bug"))
}
return err
diff --git a/runsc/boot/limits.go b/runsc/boot/limits.go
index ce62236e5..3d2b3506d 100644
--- a/runsc/boot/limits.go
+++ b/runsc/boot/limits.go
@@ -16,9 +16,9 @@ package boot
import (
"fmt"
- "syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/limits"
"gvisor.dev/gvisor/pkg/sync"
@@ -104,9 +104,9 @@ func (d *defs) initDefaults() error {
// Read host limits that directly affect the sandbox and adjust the defaults
// based on them.
- for _, res := range []int{syscall.RLIMIT_FSIZE, syscall.RLIMIT_NOFILE} {
- var hl syscall.Rlimit
- if err := syscall.Getrlimit(res, &hl); err != nil {
+ for _, res := range []int{unix.RLIMIT_FSIZE, unix.RLIMIT_NOFILE} {
+ var hl unix.Rlimit
+ if err := unix.Getrlimit(res, &hl); err != nil {
return err
}
diff --git a/runsc/boot/loader_test.go b/runsc/boot/loader_test.go
index b77b4762e..3121ca6eb 100644
--- a/runsc/boot/loader_test.go
+++ b/runsc/boot/loader_test.go
@@ -19,7 +19,6 @@ import (
"math/rand"
"os"
"reflect"
- "syscall"
"testing"
"time"
@@ -78,7 +77,7 @@ func testSpec() *specs.Spec {
// sandbox side of the connection, and a function that when called will stop the
// gofer.
func startGofer(root string) (int, func(), error) {
- fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
+ fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
if err != nil {
return 0, nil, err
}
@@ -86,8 +85,8 @@ func startGofer(root string) (int, func(), error) {
socket, err := unet.NewSocket(goferEnd)
if err != nil {
- syscall.Close(sandboxEnd)
- syscall.Close(goferEnd)
+ unix.Close(sandboxEnd)
+ unix.Close(goferEnd)
return 0, nil, fmt.Errorf("error creating server on FD %d: %v", goferEnd, err)
}
at, err := fsgofer.NewAttachPoint(root, fsgofer.Config{ROMount: true})
diff --git a/runsc/boot/network.go b/runsc/boot/network.go
index 3d3a813df..7e627e4c6 100644
--- a/runsc/boot/network.go
+++ b/runsc/boot/network.go
@@ -19,8 +19,8 @@ import (
"net"
"runtime"
"strings"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/link/fdbased"
@@ -195,7 +195,7 @@ func (n *Network) CreateLinksAndRoutes(args *CreateLinksAndRoutesArgs, _ *struct
for j := 0; j < link.NumChannels; j++ {
// Copy the underlying FD.
oldFD := args.FilePayload.Files[fdOffset].Fd()
- newFD, err := syscall.Dup(int(oldFD))
+ newFD, err := unix.Dup(int(oldFD))
if err != nil {
return fmt.Errorf("failed to dup FD %v: %v", oldFD, err)
}
diff --git a/runsc/cgroup/cgroup.go b/runsc/cgroup/cgroup.go
index ac9e4e3a8..438b7ef3e 100644
--- a/runsc/cgroup/cgroup.go
+++ b/runsc/cgroup/cgroup.go
@@ -27,7 +27,6 @@ import (
"path/filepath"
"strconv"
"strings"
- "syscall"
"time"
"github.com/cenkalti/backoff"
@@ -111,7 +110,7 @@ func setValue(path, name, data string) error {
err := ioutil.WriteFile(fullpath, []byte(data), 0700)
if err == nil {
return nil
- } else if !errors.Is(err, syscall.EINTR) {
+ } else if !errors.Is(err, unix.EINTR) {
return err
}
}
@@ -161,7 +160,7 @@ func fillFromAncestor(path string) (string, error) {
err := ioutil.WriteFile(path, []byte(val), 0700)
if err == nil {
break
- } else if !errors.Is(err, syscall.EINTR) {
+ } else if !errors.Is(err, unix.EINTR) {
return "", err
}
}
@@ -337,7 +336,7 @@ func (c *Cgroup) Install(res *specs.LinuxResources) error {
c.Own[key] = true
if err := os.MkdirAll(path, 0755); err != nil {
- if cfg.optional && errors.Is(err, syscall.EROFS) {
+ if cfg.optional && errors.Is(err, unix.EROFS) {
log.Infof("Skipping cgroup %q", key)
continue
}
@@ -370,7 +369,7 @@ func (c *Cgroup) Uninstall() error {
defer cancel()
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
fn := func() error {
- err := syscall.Rmdir(path)
+ err := unix.Rmdir(path)
if os.IsNotExist(err) {
return nil
}
diff --git a/runsc/cli/BUILD b/runsc/cli/BUILD
index 32cce2a18..f1e3cce68 100644
--- a/runsc/cli/BUILD
+++ b/runsc/cli/BUILD
@@ -18,5 +18,6 @@ go_library(
"//runsc/flag",
"//runsc/specutils",
"@com_github_google_subcommands//:go_default_library",
+ "@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/runsc/cli/main.go b/runsc/cli/main.go
index bf6928941..a3c515f4b 100644
--- a/runsc/cli/main.go
+++ b/runsc/cli/main.go
@@ -23,10 +23,10 @@ import (
"os"
"os/signal"
"runtime"
- "syscall"
"time"
"github.com/google/subcommands"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/refs"
"gvisor.dev/gvisor/pkg/sentry/platform"
@@ -198,7 +198,7 @@ func Main(version string) {
// want with them. Since Docker and Containerd both eat boot's stderr, we
// dup our stderr to the provided log FD so that panics will appear in the
// logs, rather than just disappear.
- if err := syscall.Dup3(fd, int(os.Stderr.Fd()), 0); err != nil {
+ if err := unix.Dup3(fd, int(os.Stderr.Fd()), 0); err != nil {
cmd.Fatalf("error dup'ing fd %d to stderr: %v", fd, err)
}
} else if conf.AlsoLogToStderr {
@@ -227,11 +227,11 @@ func Main(version string) {
// SIGTERM is sent to all processes if a test exceeds its
// timeout and this case is handled by syscall_test_runner.
log.Warningf("Block the TERM signal. This is only safe in tests!")
- signal.Ignore(syscall.SIGTERM)
+ signal.Ignore(unix.SIGTERM)
}
// Call the subcommand and pass in the configuration.
- var ws syscall.WaitStatus
+ var ws unix.WaitStatus
subcmdCode := subcommands.Execute(context.Background(), conf, &ws)
if subcmdCode == subcommands.ExitSuccess {
log.Infof("Exiting with status: %v", ws)
diff --git a/runsc/cmd/BUILD b/runsc/cmd/BUILD
index e3e289da3..2c3b4058b 100644
--- a/runsc/cmd/BUILD
+++ b/runsc/cmd/BUILD
@@ -77,6 +77,7 @@ go_test(
"delete_test.go",
"exec_test.go",
"gofer_test.go",
+ "mitigate_test.go",
],
data = [
"//runsc",
@@ -91,6 +92,8 @@ go_test(
"//pkg/urpc",
"//runsc/config",
"//runsc/container",
+ "//runsc/mitigate",
+ "//runsc/mitigate/mock",
"//runsc/specutils",
"@com_github_google_go_cmp//cmp:go_default_library",
"@com_github_google_go_cmp//cmp/cmpopts:go_default_library",
diff --git a/runsc/cmd/boot.go b/runsc/cmd/boot.go
index 2c92e3067..a14249641 100644
--- a/runsc/cmd/boot.go
+++ b/runsc/cmd/boot.go
@@ -19,7 +19,6 @@ import (
"os"
"runtime/debug"
"strings"
- "syscall"
"github.com/google/subcommands"
specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -259,8 +258,8 @@ func (b *Boot) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
ws := l.WaitExit()
log.Infof("application exiting with %+v", ws)
- waitStatus := args[1].(*syscall.WaitStatus)
- *waitStatus = syscall.WaitStatus(ws.Status())
+ waitStatus := args[1].(*unix.WaitStatus)
+ *waitStatus = unix.WaitStatus(ws.Status())
l.Destroy()
return subcommands.ExitSuccess
}
diff --git a/runsc/cmd/checkpoint.go b/runsc/cmd/checkpoint.go
index 124198239..a9dbe86de 100644
--- a/runsc/cmd/checkpoint.go
+++ b/runsc/cmd/checkpoint.go
@@ -18,9 +18,9 @@ import (
"context"
"os"
"path/filepath"
- "syscall"
"github.com/google/subcommands"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/container"
@@ -73,7 +73,7 @@ func (c *Checkpoint) Execute(_ context.Context, f *flag.FlagSet, args ...interfa
id := f.Arg(0)
conf := args[0].(*config.Config)
- waitStatus := args[1].(*syscall.WaitStatus)
+ waitStatus := args[1].(*unix.WaitStatus)
cont, err := container.Load(conf.RootDir, container.FullID{ContainerID: id}, container.LoadOpts{})
if err != nil {
diff --git a/runsc/cmd/chroot.go b/runsc/cmd/chroot.go
index 189244765..e988247da 100644
--- a/runsc/cmd/chroot.go
+++ b/runsc/cmd/chroot.go
@@ -18,8 +18,8 @@ import (
"fmt"
"os"
"path/filepath"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/runsc/specutils"
)
@@ -49,11 +49,11 @@ func pivotRoot(root string) error {
// will be moved to "/" too. The parent mount of the old_root will be
// new_root, so after umounting the old_root, we will see only
// the new_root in "/".
- if err := syscall.PivotRoot(".", "."); err != nil {
+ if err := unix.PivotRoot(".", "."); err != nil {
return fmt.Errorf("pivot_root failed, make sure that the root mount has a parent: %v", err)
}
- if err := syscall.Unmount(".", syscall.MNT_DETACH); err != nil {
+ if err := unix.Unmount(".", unix.MNT_DETACH); err != nil {
return fmt.Errorf("error umounting the old root file system: %v", err)
}
return nil
@@ -70,26 +70,26 @@ func setUpChroot(pidns bool) error {
// Convert all shared mounts into slave to be sure that nothing will be
// propagated outside of our namespace.
- if err := syscall.Mount("", "/", "", syscall.MS_SLAVE|syscall.MS_REC, ""); err != nil {
+ if err := unix.Mount("", "/", "", unix.MS_SLAVE|unix.MS_REC, ""); err != nil {
return fmt.Errorf("error converting mounts: %v", err)
}
- if err := syscall.Mount("runsc-root", chroot, "tmpfs", syscall.MS_NOSUID|syscall.MS_NODEV|syscall.MS_NOEXEC, ""); err != nil {
+ if err := unix.Mount("runsc-root", chroot, "tmpfs", unix.MS_NOSUID|unix.MS_NODEV|unix.MS_NOEXEC, ""); err != nil {
return fmt.Errorf("error mounting tmpfs in choot: %v", err)
}
if pidns {
- flags := uint32(syscall.MS_NOSUID | syscall.MS_NODEV | syscall.MS_NOEXEC | syscall.MS_RDONLY)
+ flags := uint32(unix.MS_NOSUID | unix.MS_NODEV | unix.MS_NOEXEC | unix.MS_RDONLY)
if err := mountInChroot(chroot, "proc", "/proc", "proc", flags); err != nil {
return fmt.Errorf("error mounting proc in chroot: %v", err)
}
} else {
- if err := mountInChroot(chroot, "/proc", "/proc", "bind", syscall.MS_BIND|syscall.MS_RDONLY|syscall.MS_REC); err != nil {
+ if err := mountInChroot(chroot, "/proc", "/proc", "bind", unix.MS_BIND|unix.MS_RDONLY|unix.MS_REC); err != nil {
return fmt.Errorf("error mounting proc in chroot: %v", err)
}
}
- if err := syscall.Mount("", chroot, "", syscall.MS_REMOUNT|syscall.MS_RDONLY|syscall.MS_BIND, ""); err != nil {
+ if err := unix.Mount("", chroot, "", unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_BIND, ""); err != nil {
return fmt.Errorf("error remounting chroot in read-only: %v", err)
}
diff --git a/runsc/cmd/cmd.go b/runsc/cmd/cmd.go
index f1a4887ef..4dd55cc33 100644
--- a/runsc/cmd/cmd.go
+++ b/runsc/cmd/cmd.go
@@ -19,9 +19,9 @@ import (
"fmt"
"runtime"
"strconv"
- "syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/runsc/specutils"
)
@@ -71,7 +71,7 @@ func setCapsAndCallSelf(args []string, caps *specs.LinuxCapabilities) error {
binPath := specutils.ExePath
log.Infof("Execve %q again, bye!", binPath)
- err := syscall.Exec(binPath, args, []string{})
+ err := unix.Exec(binPath, args, []string{})
return fmt.Errorf("error executing %s: %v", binPath, err)
}
@@ -83,16 +83,16 @@ func callSelfAsNobody(args []string) error {
const nobody = 65534
- if _, _, err := syscall.RawSyscall(syscall.SYS_SETGID, uintptr(nobody), 0, 0); err != 0 {
+ if _, _, err := unix.RawSyscall(unix.SYS_SETGID, uintptr(nobody), 0, 0); err != 0 {
return fmt.Errorf("error setting uid: %v", err)
}
- if _, _, err := syscall.RawSyscall(syscall.SYS_SETUID, uintptr(nobody), 0, 0); err != 0 {
+ if _, _, err := unix.RawSyscall(unix.SYS_SETUID, uintptr(nobody), 0, 0); err != 0 {
return fmt.Errorf("error setting gid: %v", err)
}
binPath := specutils.ExePath
log.Infof("Execve %q again, bye!", binPath)
- err := syscall.Exec(binPath, args, []string{})
+ err := unix.Exec(binPath, args, []string{})
return fmt.Errorf("error executing %s: %v", binPath, err)
}
diff --git a/runsc/cmd/debug.go b/runsc/cmd/debug.go
index b84142b0d..6212ffb2e 100644
--- a/runsc/cmd/debug.go
+++ b/runsc/cmd/debug.go
@@ -21,10 +21,10 @@ import (
"strconv"
"strings"
"sync"
- "syscall"
"time"
"github.com/google/subcommands"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/runsc/config"
@@ -135,7 +135,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// Perform synchronous actions.
if d.signal > 0 {
log.Infof("Sending signal %d to process: %d", d.signal, c.Sandbox.Pid)
- if err := syscall.Kill(c.Sandbox.Pid, syscall.Signal(d.signal)); err != nil {
+ if err := unix.Kill(c.Sandbox.Pid, unix.Signal(d.signal)); err != nil {
return Errorf("failed to send signal %d to processs %d", d.signal, c.Sandbox.Pid)
}
}
@@ -317,7 +317,7 @@ func (d *Debug) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
wg.Wait()
}()
signals := make(chan os.Signal, 1)
- signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
+ signal.Notify(signals, unix.SIGTERM, unix.SIGINT)
select {
case <-readyChan:
break // Safe to proceed.
diff --git a/runsc/cmd/do.go b/runsc/cmd/do.go
index 8a8d9f752..22c1dfeb8 100644
--- a/runsc/cmd/do.go
+++ b/runsc/cmd/do.go
@@ -26,10 +26,10 @@ import (
"path/filepath"
"strconv"
"strings"
- "syscall"
"github.com/google/subcommands"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/container"
@@ -86,7 +86,7 @@ func (c *Do) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) su
}
conf := args[0].(*config.Config)
- waitStatus := args[1].(*syscall.WaitStatus)
+ waitStatus := args[1].(*unix.WaitStatus)
if conf.Rootless {
if err := specutils.MaybeRunAsRoot(); err != nil {
@@ -225,7 +225,7 @@ func resolvePath(path string) (string, error) {
return "", fmt.Errorf("resolving %q: %v", path, err)
}
path = filepath.Clean(path)
- if err := syscall.Access(path, 0); err != nil {
+ if err := unix.Access(path, 0); err != nil {
return "", fmt.Errorf("unable to access %q: %v", path, err)
}
return path, nil
diff --git a/runsc/cmd/exec.go b/runsc/cmd/exec.go
index e9726401a..242d474b8 100644
--- a/runsc/cmd/exec.go
+++ b/runsc/cmd/exec.go
@@ -24,11 +24,11 @@ import (
"path/filepath"
"strconv"
"strings"
- "syscall"
"time"
"github.com/google/subcommands"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sentry/kernel/auth"
@@ -110,7 +110,7 @@ func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
if err != nil {
Fatalf("parsing process spec: %v", err)
}
- waitStatus := args[1].(*syscall.WaitStatus)
+ waitStatus := args[1].(*unix.WaitStatus)
c, err := container.Load(conf.RootDir, container.FullID{ContainerID: id}, container.LoadOpts{})
if err != nil {
@@ -149,7 +149,7 @@ func (ex *Exec) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
return ex.exec(c, e, waitStatus)
}
-func (ex *Exec) exec(c *container.Container, e *control.ExecArgs, waitStatus *syscall.WaitStatus) subcommands.ExitStatus {
+func (ex *Exec) exec(c *container.Container, e *control.ExecArgs, waitStatus *unix.WaitStatus) subcommands.ExitStatus {
// Start the new process and get its pid.
pid, err := c.Execute(e)
if err != nil {
@@ -189,7 +189,7 @@ func (ex *Exec) exec(c *container.Container, e *control.ExecArgs, waitStatus *sy
return subcommands.ExitSuccess
}
-func (ex *Exec) execChildAndWait(waitStatus *syscall.WaitStatus) subcommands.ExitStatus {
+func (ex *Exec) execChildAndWait(waitStatus *unix.WaitStatus) subcommands.ExitStatus {
var args []string
for _, a := range os.Args[1:] {
if !strings.Contains(a, "detach") {
@@ -233,7 +233,7 @@ func (ex *Exec) execChildAndWait(waitStatus *syscall.WaitStatus) subcommands.Exi
cmd.Stdin = tty
cmd.Stdout = tty
cmd.Stderr = tty
- cmd.SysProcAttr = &syscall.SysProcAttr{
+ cmd.SysProcAttr = &unix.SysProcAttr{
Setsid: true,
Setctty: true,
// The Ctty FD must be the FD in the child process's FD
@@ -263,7 +263,7 @@ func (ex *Exec) execChildAndWait(waitStatus *syscall.WaitStatus) subcommands.Exi
}
return pid == cmd.Process.Pid, nil
}
- if pe, ok := err.(*os.PathError); !ok || pe.Err != syscall.ENOENT {
+ if pe, ok := err.(*os.PathError); !ok || pe.Err != unix.ENOENT {
return false, err
}
// No file yet, continue to wait...
diff --git a/runsc/cmd/gofer.go b/runsc/cmd/gofer.go
index 371fcc0ae..639b2219c 100644
--- a/runsc/cmd/gofer.go
+++ b/runsc/cmd/gofer.go
@@ -21,7 +21,6 @@ import (
"os"
"path/filepath"
"strings"
- "syscall"
"github.com/google/subcommands"
specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -149,16 +148,16 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// fsgofer should run with a umask of 0, because we want to preserve file
// modes exactly as sent by the sandbox, which will have applied its own umask.
- syscall.Umask(0)
+ unix.Umask(0)
if err := fsgofer.OpenProcSelfFD(); err != nil {
Fatalf("failed to open /proc/self/fd: %v", err)
}
- if err := syscall.Chroot(root); err != nil {
+ if err := unix.Chroot(root); err != nil {
Fatalf("failed to chroot to %q: %v", root, err)
}
- if err := syscall.Chdir("/"); err != nil {
+ if err := unix.Chdir("/"); err != nil {
Fatalf("changing working dir: %v", err)
}
log.Infof("Process chroot'd to %q", root)
@@ -166,7 +165,8 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
// Start with root mount, then add any other additional mount as needed.
ats := make([]p9.Attacher, 0, len(spec.Mounts)+1)
ap, err := fsgofer.NewAttachPoint("/", fsgofer.Config{
- ROMount: spec.Root.Readonly || conf.Overlay,
+ ROMount: spec.Root.Readonly || conf.Overlay,
+ EnableXattr: conf.Verity,
})
if err != nil {
Fatalf("creating attach point: %v", err)
@@ -178,8 +178,9 @@ func (g *Gofer) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
for _, m := range spec.Mounts {
if specutils.Is9PMount(m) {
cfg := fsgofer.Config{
- ROMount: isReadonlyMount(m.Options) || conf.Overlay,
- HostUDS: conf.FSGoferHostUDS,
+ ROMount: isReadonlyMount(m.Options) || conf.Overlay,
+ HostUDS: conf.FSGoferHostUDS,
+ EnableXattr: conf.Verity,
}
ap, err := fsgofer.NewAttachPoint(m.Destination, cfg)
if err != nil {
@@ -262,7 +263,7 @@ func isReadonlyMount(opts []string) bool {
func setupRootFS(spec *specs.Spec, conf *config.Config) error {
// Convert all shared mounts into slaves to be sure that nothing will be
// propagated outside of our namespace.
- if err := syscall.Mount("", "/", "", syscall.MS_SLAVE|syscall.MS_REC, ""); err != nil {
+ if err := unix.Mount("", "/", "", unix.MS_SLAVE|unix.MS_REC, ""); err != nil {
Fatalf("error converting mounts: %v", err)
}
@@ -274,30 +275,30 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {
//
// We need a directory to construct a new root and we know that
// runsc can't start without /proc, so we can use it for this.
- flags := uintptr(syscall.MS_NOSUID | syscall.MS_NODEV | syscall.MS_NOEXEC)
- if err := syscall.Mount("runsc-root", "/proc", "tmpfs", flags, ""); err != nil {
+ flags := uintptr(unix.MS_NOSUID | unix.MS_NODEV | unix.MS_NOEXEC)
+ if err := unix.Mount("runsc-root", "/proc", "tmpfs", flags, ""); err != nil {
Fatalf("error mounting tmpfs: %v", err)
}
// Prepare tree structure for pivot_root(2).
os.Mkdir("/proc/proc", 0755)
os.Mkdir("/proc/root", 0755)
- if err := syscall.Mount("runsc-proc", "/proc/proc", "proc", flags|syscall.MS_RDONLY, ""); err != nil {
+ if err := unix.Mount("runsc-proc", "/proc/proc", "proc", flags|unix.MS_RDONLY, ""); err != nil {
Fatalf("error mounting proc: %v", err)
}
root = "/proc/root"
}
// Mount root path followed by submounts.
- if err := syscall.Mount(spec.Root.Path, root, "bind", syscall.MS_BIND|syscall.MS_REC, ""); err != nil {
+ if err := unix.Mount(spec.Root.Path, root, "bind", unix.MS_BIND|unix.MS_REC, ""); err != nil {
return fmt.Errorf("mounting root on root (%q) err: %v", root, err)
}
- flags := uint32(syscall.MS_SLAVE | syscall.MS_REC)
+ flags := uint32(unix.MS_SLAVE | unix.MS_REC)
if spec.Linux != nil && spec.Linux.RootfsPropagation != "" {
flags = specutils.PropOptionsToFlags([]string{spec.Linux.RootfsPropagation})
}
- if err := syscall.Mount("", root, "", uintptr(flags), ""); err != nil {
+ if err := unix.Mount("", root, "", uintptr(flags), ""); err != nil {
return fmt.Errorf("mounting root (%q) with flags: %#x, err: %v", root, flags, err)
}
@@ -323,8 +324,8 @@ func setupRootFS(spec *specs.Spec, conf *config.Config) error {
// If root is a mount point but not read-only, we can change mount options
// to make it read-only for extra safety.
log.Infof("Remounting root as readonly: %q", root)
- flags := uintptr(syscall.MS_BIND | syscall.MS_REMOUNT | syscall.MS_RDONLY | syscall.MS_REC)
- if err := syscall.Mount(root, root, "bind", flags, ""); err != nil {
+ flags := uintptr(unix.MS_BIND | unix.MS_REMOUNT | unix.MS_RDONLY | unix.MS_REC)
+ if err := unix.Mount(root, root, "bind", flags, ""); err != nil {
return fmt.Errorf("remounting root as read-only with source: %q, target: %q, flags: %#x, err: %v", root, root, flags, err)
}
}
@@ -354,10 +355,10 @@ func setupMounts(conf *config.Config, mounts []specs.Mount, root string) error {
return fmt.Errorf("resolving symlinks to %q: %v", m.Destination, err)
}
- flags := specutils.OptionsToFlags(m.Options) | syscall.MS_BIND
+ flags := specutils.OptionsToFlags(m.Options) | unix.MS_BIND
if conf.Overlay {
// Force mount read-only if writes are not going to be sent to it.
- flags |= syscall.MS_RDONLY
+ flags |= unix.MS_RDONLY
}
log.Infof("Mounting src: %q, dst: %q, flags: %#x", m.Source, dst, flags)
@@ -368,7 +369,7 @@ func setupMounts(conf *config.Config, mounts []specs.Mount, root string) error {
// Set propagation options that cannot be set together with other options.
flags = specutils.PropOptionsToFlags(m.Options)
if flags != 0 {
- if err := syscall.Mount("", dst, "", uintptr(flags), ""); err != nil {
+ if err := unix.Mount("", dst, "", uintptr(flags), ""); err != nil {
return fmt.Errorf("mount dst: %q, flags: %#x, err: %v", dst, flags, err)
}
}
@@ -469,8 +470,8 @@ func adjustMountOptions(conf *config.Config, path string, opts []string) ([]stri
copy(rv, opts)
if conf.OverlayfsStaleRead {
- statfs := syscall.Statfs_t{}
- if err := syscall.Statfs(path, &statfs); err != nil {
+ statfs := unix.Statfs_t{}
+ if err := unix.Statfs(path, &statfs); err != nil {
return nil, err
}
if statfs.Type == unix.OVERLAYFS_SUPER_MAGIC {
diff --git a/runsc/cmd/kill.go b/runsc/cmd/kill.go
index e0df39266..239fc7ac2 100644
--- a/runsc/cmd/kill.go
+++ b/runsc/cmd/kill.go
@@ -19,7 +19,6 @@ import (
"fmt"
"strconv"
"strings"
- "syscall"
"github.com/google/subcommands"
"golang.org/x/sys/unix"
@@ -99,10 +98,10 @@ func (k *Kill) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
return subcommands.ExitSuccess
}
-func parseSignal(s string) (syscall.Signal, error) {
+func parseSignal(s string) (unix.Signal, error) {
n, err := strconv.Atoi(s)
if err == nil {
- sig := syscall.Signal(n)
+ sig := unix.Signal(n)
for _, msig := range signalMap {
if sig == msig {
return sig, nil
@@ -116,7 +115,7 @@ func parseSignal(s string) (syscall.Signal, error) {
return -1, fmt.Errorf("unknown signal %q", s)
}
-var signalMap = map[string]syscall.Signal{
+var signalMap = map[string]unix.Signal{
"ABRT": unix.SIGABRT,
"ALRM": unix.SIGALRM,
"BUS": unix.SIGBUS,
diff --git a/runsc/cmd/mitigate.go b/runsc/cmd/mitigate.go
index 822af1917..fddf0e0dd 100644
--- a/runsc/cmd/mitigate.go
+++ b/runsc/cmd/mitigate.go
@@ -16,6 +16,8 @@ package cmd
import (
"context"
+ "fmt"
+ "io/ioutil"
"github.com/google/subcommands"
"gvisor.dev/gvisor/pkg/log"
@@ -23,9 +25,23 @@ import (
"gvisor.dev/gvisor/runsc/mitigate"
)
+const (
+ // cpuInfo is the path used to parse CPU info.
+ cpuInfo = "/proc/cpuinfo"
+ // allPossibleCPUs is the path used to enable CPUs.
+ allPossibleCPUs = "/sys/devices/system/cpu/possible"
+)
+
// Mitigate implements subcommands.Command for the "mitigate" command.
type Mitigate struct {
- mitigate mitigate.Mitigate
+ // Run the command without changing the underlying system.
+ dryRun bool
+ // Reverse mitigate by turning on all CPU cores.
+ reverse bool
+ // Path to file to read to create CPUSet.
+ path string
+ // Callback to check if a given thread is vulnerable.
+ vulnerable func(other mitigate.Thread) bool
}
// Name implements subcommands.command.name.
@@ -38,14 +54,19 @@ func (*Mitigate) Synopsis() string {
return "mitigate mitigates the underlying system against side channel attacks"
}
-// Usage implements subcommands.Command.Usage.
-func (m *Mitigate) Usage() string {
- return m.mitigate.Usage()
+// Usage implments Usage for cmd.Mitigate.
+func (m Mitigate) Usage() string {
+ return `mitigate [flags]
+
+mitigate mitigates a system to the "MDS" vulnerability by implementing a manual shutdown of SMT. The command checks /proc/cpuinfo for cpus having the MDS vulnerability, and if found, shutdown all but one CPU per hyperthread pair via /sys/devices/system/cpu/cpu{N}/online. CPUs can be restored by writing "2" to each file in /sys/devices/system/cpu/cpu{N}/online or performing a system reboot.
+
+The command can be reversed with --reverse, which reads the total CPUs from /sys/devices/system/cpu/possible and enables all with /sys/devices/system/cpu/cpu{N}/online.`
}
-// SetFlags implements subcommands.Command.SetFlags.
+// SetFlags sets flags for the command Mitigate.
func (m *Mitigate) SetFlags(f *flag.FlagSet) {
- m.mitigate.SetFlags(f)
+ f.BoolVar(&m.dryRun, "dryrun", false, "run the command without changing system")
+ f.BoolVar(&m.reverse, "reverse", false, "reverse mitigate by enabling all CPUs")
}
// Execute implements subcommands.Command.Execute.
@@ -55,10 +76,97 @@ func (m *Mitigate) Execute(_ context.Context, f *flag.FlagSet, args ...interface
return subcommands.ExitUsageError
}
- if err := m.mitigate.Execute(); err != nil {
+ m.path = cpuInfo
+ if m.reverse {
+ m.path = allPossibleCPUs
+ }
+
+ m.vulnerable = func(other mitigate.Thread) bool {
+ return other.IsVulnerable()
+ }
+
+ if _, err := m.doExecute(); err != nil {
log.Warningf("Execute failed: %v", err)
return subcommands.ExitFailure
}
return subcommands.ExitSuccess
}
+
+// Execute executes the Mitigate command.
+func (m *Mitigate) doExecute() (mitigate.CPUSet, error) {
+ if m.dryRun {
+ log.Infof("Running with DryRun. No cpu settings will be changed.")
+ }
+ if m.reverse {
+ data, err := ioutil.ReadFile(m.path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read %s: %v", m.path, err)
+ }
+
+ set, err := m.doReverse(data)
+ if err != nil {
+ return nil, fmt.Errorf("reverse operation failed: %v", err)
+ }
+ return set, nil
+ }
+
+ data, err := ioutil.ReadFile(m.path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read %s: %v", m.path, err)
+ }
+ set, err := m.doMitigate(data)
+ if err != nil {
+ return nil, fmt.Errorf("mitigate operation failed: %v", err)
+ }
+ return set, nil
+}
+
+func (m *Mitigate) doMitigate(data []byte) (mitigate.CPUSet, error) {
+ set, err := mitigate.NewCPUSet(data, m.vulnerable)
+ if err != nil {
+ return nil, err
+ }
+
+ log.Infof("Mitigate found the following CPUs...")
+ log.Infof("%s", set)
+
+ disableList := set.GetShutdownList()
+ log.Infof("Disabling threads on thread pairs.")
+ for _, t := range disableList {
+ log.Infof("Disable thread: %s", t)
+ if m.dryRun {
+ continue
+ }
+ if err := t.Disable(); err != nil {
+ return nil, fmt.Errorf("error disabling thread: %s err: %v", t, err)
+ }
+ }
+ log.Infof("Shutdown successful.")
+ return set, nil
+}
+
+func (m *Mitigate) doReverse(data []byte) (mitigate.CPUSet, error) {
+ set, err := mitigate.NewCPUSetFromPossible(data)
+ if err != nil {
+ return nil, err
+ }
+
+ log.Infof("Reverse mitigate found the following CPUs...")
+ log.Infof("%s", set)
+
+ enableList := set.GetRemainingList()
+
+ log.Infof("Enabling all CPUs...")
+ for _, t := range enableList {
+ log.Infof("Enabling thread: %s", t)
+ if m.dryRun {
+ continue
+ }
+ if err := t.Enable(); err != nil {
+ return nil, fmt.Errorf("error enabling thread: %s err: %v", t, err)
+ }
+ }
+ log.Infof("Enable successful.")
+ return set, nil
+}
diff --git a/runsc/cmd/mitigate_test.go b/runsc/cmd/mitigate_test.go
new file mode 100644
index 000000000..163fece42
--- /dev/null
+++ b/runsc/cmd/mitigate_test.go
@@ -0,0 +1,169 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cmd
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+
+ "gvisor.dev/gvisor/runsc/mitigate"
+ "gvisor.dev/gvisor/runsc/mitigate/mock"
+)
+
+type executeTestCase struct {
+ name string
+ mitigateData string
+ mitigateError error
+ mitigateCPU int
+ reverseData string
+ reverseError error
+ reverseCPU int
+}
+
+func TestExecute(t *testing.T) {
+
+ partial := `processor : 1
+vendor_id : AuthenticAMD
+cpu family : 23
+model : 49
+model name : AMD EPYC 7B12
+physical id : 0
+bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass
+power management:
+`
+
+ for _, tc := range []executeTestCase{
+ {
+ name: "CascadeLake4",
+ mitigateData: mock.CascadeLake4.MakeCPUString(),
+ mitigateCPU: 2,
+ reverseData: mock.CascadeLake4.MakeSysPossibleString(),
+ reverseCPU: 4,
+ },
+ {
+ name: "Empty",
+ mitigateData: "",
+ mitigateError: fmt.Errorf(`mitigate operation failed: no cpus found for: ""`),
+ reverseData: "",
+ reverseError: fmt.Errorf(`reverse operation failed: mismatch regex from possible: ""`),
+ },
+ {
+ name: "Partial",
+ mitigateData: `processor : 0
+vendor_id : AuthenticAMD
+cpu family : 23
+model : 49
+model name : AMD EPYC 7B12
+physical id : 0
+core id : 0
+cpu cores : 1
+bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass
+power management::84
+
+` + partial,
+ mitigateError: fmt.Errorf(`mitigate operation failed: failed to match key "core id": %q`, partial),
+ reverseData: "1-",
+ reverseError: fmt.Errorf(`reverse operation failed: mismatch regex from possible: %q`, "1-"),
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ m := &Mitigate{
+ dryRun: true,
+ vulnerable: func(other mitigate.Thread) bool {
+ return other.IsVulnerable()
+ },
+ }
+ m.doExecuteTest(t, "Mitigate", tc.mitigateData, tc.mitigateCPU, tc.mitigateError)
+
+ m.reverse = true
+ m.doExecuteTest(t, "Reverse", tc.reverseData, tc.reverseCPU, tc.reverseError)
+ })
+ }
+}
+
+func TestExecuteSmoke(t *testing.T) {
+ smokeMitigate, err := ioutil.ReadFile(cpuInfo)
+ if err != nil {
+ t.Fatalf("Failed to read %s: %v", cpuInfo, err)
+ }
+
+ m := &Mitigate{
+ dryRun: true,
+ vulnerable: func(other mitigate.Thread) bool {
+ return other.IsVulnerable()
+ },
+ }
+
+ m.doExecuteTest(t, "Mitigate", string(smokeMitigate), 0, nil)
+
+ smokeReverse, err := ioutil.ReadFile(allPossibleCPUs)
+ if err != nil {
+ t.Fatalf("Failed to read %s: %v", allPossibleCPUs, err)
+ }
+
+ m.reverse = true
+ m.doExecuteTest(t, "Reverse", string(smokeReverse), 0, nil)
+}
+
+// doExecuteTest runs Execute with the mitigate operation and reverse operation.
+func (m *Mitigate) doExecuteTest(t *testing.T, name, data string, want int, wantErr error) {
+ t.Run(name, func(t *testing.T) {
+ file, err := ioutil.TempFile("", "outfile.txt")
+ if err != nil {
+ t.Fatalf("Failed to create tmpfile: %v", err)
+ }
+ defer os.Remove(file.Name())
+
+ if _, err := file.WriteString(data); err != nil {
+ t.Fatalf("Failed to write to file: %v", err)
+ }
+
+ // Set fields for mitigate and dryrun to keep test hermetic.
+ m.path = file.Name()
+
+ set, err := m.doExecute()
+ if err = checkErr(wantErr, err); err != nil {
+ t.Fatalf("Mitigate error mismatch: %v", err)
+ }
+
+ // case where test should end in error or we don't care
+ // about how many cpus are returned.
+ if wantErr != nil || want < 1 {
+ return
+ }
+ got := len(set.GetRemainingList())
+ if want != got {
+ t.Fatalf("Failed wrong number of remaining CPUs: want %d, got %d", want, got)
+ }
+
+ })
+}
+
+// checkErr checks error for equality.
+func checkErr(want, got error) error {
+ switch {
+ case want == nil && got == nil:
+ case want != nil && got == nil:
+ fallthrough
+ case want == nil && got != nil:
+ fallthrough
+ case want.Error() != strings.Trim(got.Error(), " "):
+ return fmt.Errorf("got: %v want: %v", got, want)
+ }
+ return nil
+}
diff --git a/runsc/cmd/restore.go b/runsc/cmd/restore.go
index 096ec814c..b21f05921 100644
--- a/runsc/cmd/restore.go
+++ b/runsc/cmd/restore.go
@@ -17,9 +17,9 @@ package cmd
import (
"context"
"path/filepath"
- "syscall"
"github.com/google/subcommands"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/container"
"gvisor.dev/gvisor/runsc/flag"
@@ -78,7 +78,7 @@ func (r *Restore) Execute(_ context.Context, f *flag.FlagSet, args ...interface{
id := f.Arg(0)
conf := args[0].(*config.Config)
- waitStatus := args[1].(*syscall.WaitStatus)
+ waitStatus := args[1].(*unix.WaitStatus)
if conf.Rootless {
return Errorf("Rootless mode not supported with %q", r.Name())
diff --git a/runsc/cmd/run.go b/runsc/cmd/run.go
index c48cbe4cd..722181aff 100644
--- a/runsc/cmd/run.go
+++ b/runsc/cmd/run.go
@@ -16,9 +16,9 @@ package cmd
import (
"context"
- "syscall"
"github.com/google/subcommands"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/container"
"gvisor.dev/gvisor/runsc/flag"
@@ -65,7 +65,7 @@ func (r *Run) Execute(_ context.Context, f *flag.FlagSet, args ...interface{}) s
id := f.Arg(0)
conf := args[0].(*config.Config)
- waitStatus := args[1].(*syscall.WaitStatus)
+ waitStatus := args[1].(*unix.WaitStatus)
if conf.Rootless {
return Errorf("Rootless mode not supported with %q", r.Name())
diff --git a/runsc/cmd/wait.go b/runsc/cmd/wait.go
index 5d55422c7..d7a783b88 100644
--- a/runsc/cmd/wait.go
+++ b/runsc/cmd/wait.go
@@ -18,9 +18,9 @@ import (
"context"
"encoding/json"
"os"
- "syscall"
"github.com/google/subcommands"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/runsc/config"
"gvisor.dev/gvisor/runsc/container"
"gvisor.dev/gvisor/runsc/flag"
@@ -77,7 +77,7 @@ func (wt *Wait) Execute(_ context.Context, f *flag.FlagSet, args ...interface{})
Fatalf("loading container: %v", err)
}
- var waitStatus syscall.WaitStatus
+ var waitStatus unix.WaitStatus
switch {
// Wait on the whole container.
case wt.rootPID == unsetPID && wt.pid == unsetPID:
@@ -119,7 +119,7 @@ type waitResult struct {
// exitStatus returns the correct exit status for a process based on if it
// was signaled or exited cleanly.
-func exitStatus(status syscall.WaitStatus) int {
+func exitStatus(status unix.WaitStatus) int {
if status.Signaled() {
return 128 + int(status.Signal())
}
diff --git a/runsc/config/config.go b/runsc/config/config.go
index e9fd7708f..34ef48825 100644
--- a/runsc/config/config.go
+++ b/runsc/config/config.go
@@ -64,6 +64,9 @@ type Config struct {
// Overlay is whether to wrap the root filesystem in an overlay.
Overlay bool `flag:"overlay"`
+ // Verity is whether there's one or more verity file system to mount.
+ Verity bool `flag:"verity"`
+
// FSGoferHostUDS enables the gofer to mount a host UDS.
FSGoferHostUDS bool `flag:"fsgofer-host-uds"`
diff --git a/runsc/config/flags.go b/runsc/config/flags.go
index 7e738dfdf..adbee506c 100644
--- a/runsc/config/flags.go
+++ b/runsc/config/flags.go
@@ -69,6 +69,7 @@ func RegisterFlags() {
// Flags that control sandbox runtime behavior: FS related.
flag.Var(fileAccessTypePtr(FileAccessExclusive), "file-access", "specifies which filesystem to use for the root mount: exclusive (default), shared. Volume mounts are always shared.")
flag.Bool("overlay", false, "wrap filesystem mounts with writable overlay. All modifications are stored in memory inside the sandbox.")
+ flag.Bool("verity", false, "specifies whether a verity file system will be mounted.")
flag.Bool("overlayfs-stale-read", true, "assume root mount is an overlay filesystem")
flag.Bool("fsgofer-host-uds", false, "allow the gofer to mount Unix Domain Sockets.")
flag.Bool("vfs2", false, "enables VFSv2. This uses the new VFS layer that is faster than the previous one.")
diff --git a/runsc/container/BUILD b/runsc/container/BUILD
index 8793c8916..3620dc8c3 100644
--- a/runsc/container/BUILD
+++ b/runsc/container/BUILD
@@ -30,6 +30,7 @@ go_library(
"@com_github_cenkalti_backoff//:go_default_library",
"@com_github_gofrs_flock//:go_default_library",
"@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
+ "@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/runsc/container/console_test.go b/runsc/container/console_test.go
index 7a3d5a523..79b056fce 100644
--- a/runsc/container/console_test.go
+++ b/runsc/container/console_test.go
@@ -21,7 +21,6 @@ import (
"math/rand"
"os"
"path/filepath"
- "syscall"
"testing"
"time"
@@ -320,7 +319,7 @@ func TestJobControlSignalExec(t *testing.T) {
// Send a SIGTERM to the foreground process for the exec PID. Note that
// although we pass in the PID of "bash", it should actually terminate
// "sleep", since that is the foreground process.
- if err := c.Sandbox.SignalProcess(c.ID, pid, syscall.SIGTERM, true /* fgProcess */); err != nil {
+ if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
@@ -340,7 +339,7 @@ func TestJobControlSignalExec(t *testing.T) {
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
- if err := c.Sandbox.SignalProcess(c.ID, pid, syscall.SIGKILL, true /* fgProcess */); err != nil {
+ if err := c.Sandbox.SignalProcess(c.ID, pid, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
expectedPL = expectedPL[:1]
@@ -356,7 +355,7 @@ func TestJobControlSignalExec(t *testing.T) {
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
- if got, want := ws.Signal(), syscall.SIGKILL; got != want {
+ if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, want)
}
}
@@ -423,7 +422,7 @@ func TestJobControlSignalRootContainer(t *testing.T) {
// very early, otherwise it might exit before we have a chance to call
// Wait.
var (
- ws syscall.WaitStatus
+ ws unix.WaitStatus
wg sync.WaitGroup
)
wg.Add(1)
@@ -459,7 +458,7 @@ func TestJobControlSignalRootContainer(t *testing.T) {
// Send a SIGTERM to the foreground process. We pass PID=0, indicating
// that the root process should be killed. However, by setting
// fgProcess=true, the signal should actually be sent to sleep.
- if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, syscall.SIGTERM, true /* fgProcess */); err != nil {
+ if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGTERM, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
@@ -479,7 +478,7 @@ func TestJobControlSignalRootContainer(t *testing.T) {
// Send a SIGKILL to the foreground process again. This time "bash"
// should be killed. We use SIGKILL instead of SIGTERM or SIGINT
// because bash ignores those.
- if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, syscall.SIGKILL, true /* fgProcess */); err != nil {
+ if err := c.Sandbox.SignalProcess(c.ID, 0 /* PID */, unix.SIGKILL, true /* fgProcess */); err != nil {
t.Fatalf("error signaling container: %v", err)
}
@@ -488,7 +487,7 @@ func TestJobControlSignalRootContainer(t *testing.T) {
if !ws.Signaled() {
t.Error("ws.Signaled() got false, want true")
}
- if got, want := ws.Signal(), syscall.SIGKILL; got != want {
+ if got, want := ws.Signal(), unix.SIGKILL; got != want {
t.Errorf("ws.Signal() got %v, want %v", got, want)
}
}
diff --git a/runsc/container/container.go b/runsc/container/container.go
index 40812efb8..f9d83c118 100644
--- a/runsc/container/container.go
+++ b/runsc/container/container.go
@@ -30,6 +30,7 @@ import (
"github.com/cenkalti/backoff"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/cleanup"
"gvisor.dev/gvisor/pkg/log"
@@ -244,7 +245,7 @@ func New(conf *config.Config, args Args) (*Container, error) {
// If there is cgroup config, install it before creating sandbox process.
if err := cg.Install(args.Spec.Linux.Resources); err != nil {
switch {
- case errors.Is(err, syscall.EACCES) && conf.Rootless:
+ case errors.Is(err, unix.EACCES) && conf.Rootless:
log.Warningf("Skipping cgroup configuration in rootless mode: %v", err)
cg = nil
default:
@@ -447,7 +448,7 @@ func (c *Container) Restore(spec *specs.Spec, conf *config.Config, restoreFile s
}
// Run is a helper that calls Create + Start + Wait.
-func Run(conf *config.Config, args Args) (syscall.WaitStatus, error) {
+func Run(conf *config.Config, args Args) (unix.WaitStatus, error) {
log.Debugf("Run container, cid: %s, rootDir: %q", args.ID, conf.RootDir)
c, err := New(conf, args)
if err != nil {
@@ -517,7 +518,7 @@ func (c *Container) SandboxPid() int {
// Wait waits for the container to exit, and returns its WaitStatus.
// Call to wait on a stopped container is needed to retrieve the exit status
// and wait returns immediately.
-func (c *Container) Wait() (syscall.WaitStatus, error) {
+func (c *Container) Wait() (unix.WaitStatus, error) {
log.Debugf("Wait on container, cid: %s", c.ID)
ws, err := c.Sandbox.Wait(c.ID)
if err == nil {
@@ -529,7 +530,7 @@ func (c *Container) Wait() (syscall.WaitStatus, error) {
// WaitRootPID waits for process 'pid' in the sandbox's PID namespace and
// returns its WaitStatus.
-func (c *Container) WaitRootPID(pid int32) (syscall.WaitStatus, error) {
+func (c *Container) WaitRootPID(pid int32) (unix.WaitStatus, error) {
log.Debugf("Wait on process %d in sandbox, cid: %s", pid, c.Sandbox.ID)
if !c.IsSandboxRunning() {
return 0, fmt.Errorf("sandbox is not running")
@@ -539,7 +540,7 @@ func (c *Container) WaitRootPID(pid int32) (syscall.WaitStatus, error) {
// WaitPID waits for process 'pid' in the container's PID namespace and returns
// its WaitStatus.
-func (c *Container) WaitPID(pid int32) (syscall.WaitStatus, error) {
+func (c *Container) WaitPID(pid int32) (unix.WaitStatus, error) {
log.Debugf("Wait on process %d in container, cid: %s", pid, c.ID)
if !c.IsSandboxRunning() {
return 0, fmt.Errorf("sandbox is not running")
@@ -551,7 +552,7 @@ func (c *Container) WaitPID(pid int32) (syscall.WaitStatus, error) {
// is SIGKILL, then waits for all processes to exit before returning.
// SignalContainer returns an error if the container is already stopped.
// TODO(b/113680494): Distinguish different error types.
-func (c *Container) SignalContainer(sig syscall.Signal, all bool) error {
+func (c *Container) SignalContainer(sig unix.Signal, all bool) error {
log.Debugf("Signal container, cid: %s, signal: %v (%d)", c.ID, sig, sig)
// Signaling container in Stopped state is allowed. When all=false,
// an error will be returned anyway; when all=true, this allows
@@ -568,7 +569,7 @@ func (c *Container) SignalContainer(sig syscall.Signal, all bool) error {
}
// SignalProcess sends sig to a specific process in the container.
-func (c *Container) SignalProcess(sig syscall.Signal, pid int32) error {
+func (c *Container) SignalProcess(sig unix.Signal, pid int32) error {
log.Debugf("Signal process %d in container, cid: %s, signal: %v (%d)", pid, c.ID, sig, sig)
if err := c.requireStatus("signal a process inside", Running); err != nil {
return err
@@ -586,7 +587,7 @@ func (c *Container) ForwardSignals(pid int32, fgProcess bool) func() {
log.Debugf("Forwarding all signals to container, cid: %s, PIDPID: %d, fgProcess: %t", c.ID, pid, fgProcess)
stop := sighandling.StartSignalForwarding(func(sig linux.Signal) {
log.Debugf("Forwarding signal %d to container, cid: %s, PID: %d, fgProcess: %t", sig, c.ID, pid, fgProcess)
- if err := c.Sandbox.SignalProcess(c.ID, pid, syscall.Signal(sig), fgProcess); err != nil {
+ if err := c.Sandbox.SignalProcess(c.ID, pid, unix.Signal(sig), fgProcess); err != nil {
log.Warningf("error forwarding signal %d to container %q: %v", sig, c.ID, err)
}
})
@@ -768,9 +769,9 @@ func (c *Container) stop() error {
// Try killing gofer if it does not exit with container.
if c.GoferPid != 0 {
log.Debugf("Killing gofer for container, cid: %s, PID: %d", c.ID, c.GoferPid)
- if err := syscall.Kill(c.GoferPid, syscall.SIGKILL); err != nil {
+ if err := unix.Kill(c.GoferPid, unix.SIGKILL); err != nil {
// The gofer may already be stopped, log the error.
- log.Warningf("Error sending signal %d to gofer %d: %v", syscall.SIGKILL, c.GoferPid, err)
+ log.Warningf("Error sending signal %d to gofer %d: %v", unix.SIGKILL, c.GoferPid, err)
}
}
@@ -793,7 +794,7 @@ func (c *Container) waitForStopped() error {
b := backoff.WithContext(backoff.NewConstantBackOff(100*time.Millisecond), ctx)
op := func() error {
if c.IsSandboxRunning() {
- if err := c.SignalContainer(syscall.Signal(0), false); err == nil {
+ if err := c.SignalContainer(unix.Signal(0), false); err == nil {
return fmt.Errorf("container is still running")
}
}
@@ -803,7 +804,7 @@ func (c *Container) waitForStopped() error {
if c.goferIsChild {
// The gofer process is a child of the current process,
// so we can wait it and collect its zombie.
- wpid, err := syscall.Wait4(int(c.GoferPid), nil, syscall.WNOHANG, nil)
+ wpid, err := unix.Wait4(int(c.GoferPid), nil, unix.WNOHANG, nil)
if err != nil {
return fmt.Errorf("error waiting the gofer process: %v", err)
}
@@ -811,7 +812,7 @@ func (c *Container) waitForStopped() error {
return fmt.Errorf("gofer is still running")
}
- } else if err := syscall.Kill(c.GoferPid, 0); err == nil {
+ } else if err := unix.Kill(c.GoferPid, 0); err == nil {
return fmt.Errorf("gofer is still running")
}
c.GoferPid = 0
@@ -892,7 +893,7 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *config.Config, bu
sandEnds := make([]*os.File, 0, mountCount)
for i := 0; i < mountCount; i++ {
- fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM|syscall.SOCK_CLOEXEC, 0)
+ fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM|unix.SOCK_CLOEXEC, 0)
if err != nil {
return nil, nil, err
}
@@ -914,8 +915,8 @@ func (c *Container) createGoferProcess(spec *specs.Spec, conf *config.Config, bu
if attached {
// The gofer is attached to the lifetime of this process, so it
// should synchronously die when this process dies.
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Pdeathsig: syscall.SIGKILL,
+ cmd.SysProcAttr = &unix.SysProcAttr{
+ Pdeathsig: unix.SIGKILL,
}
}
@@ -1113,7 +1114,7 @@ func setOOMScoreAdj(pid int, scoreAdj int) error {
}
defer f.Close()
if _, err := f.WriteString(strconv.Itoa(scoreAdj)); err != nil {
- if errors.Is(err, syscall.ESRCH) {
+ if errors.Is(err, unix.ESRCH) {
log.Warningf("Process (%d) exited while setting oom_score_adj", pid)
return nil
}
diff --git a/runsc/container/container_test.go b/runsc/container/container_test.go
index 862d9444d..5a0c468a4 100644
--- a/runsc/container/container_test.go
+++ b/runsc/container/container_test.go
@@ -27,12 +27,12 @@ import (
"reflect"
"strconv"
"strings"
- "syscall"
"testing"
"time"
"github.com/cenkalti/backoff"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bits"
"gvisor.dev/gvisor/pkg/log"
@@ -103,7 +103,7 @@ func waitForProcessCount(cont *Container, want int) error {
func blockUntilWaitable(pid int) error {
_, _, err := specutils.RetryEintr(func() (uintptr, uintptr, error) {
var err error
- _, _, err1 := syscall.Syscall6(syscall.SYS_WAITID, 1, uintptr(pid), 0, syscall.WEXITED|syscall.WNOWAIT, 0, 0)
+ _, _, err1 := unix.Syscall6(unix.SYS_WAITID, 1, uintptr(pid), 0, unix.WEXITED|unix.WNOWAIT, 0, 0)
if err1 != 0 {
err = err1
}
@@ -468,7 +468,7 @@ func TestLifecycle(t *testing.T) {
if err != nil {
ch <- err
}
- if got, want := ws.Signal(), syscall.SIGTERM; got != want {
+ if got, want := ws.Signal(), unix.SIGTERM; got != want {
ch <- fmt.Errorf("got signal %v, want %v", got, want)
}
ch <- nil
@@ -479,8 +479,8 @@ func TestLifecycle(t *testing.T) {
time.Sleep(time.Second)
// Send the container a SIGTERM which will cause it to stop.
- if err := c.SignalContainer(syscall.SIGTERM, false); err != nil {
- t.Fatalf("error sending signal %v to container: %v", syscall.SIGTERM, err)
+ if err := c.SignalContainer(unix.SIGTERM, false); err != nil {
+ t.Fatalf("error sending signal %v to container: %v", unix.SIGTERM, err)
}
// Wait for it to die.
@@ -815,11 +815,11 @@ func TestExec(t *testing.T) {
t.Run("nonexist", func(t *testing.T) {
// b/179114837 found by Syzkaller that causes nil pointer panic when
// trying to dec-ref an unix socket FD.
- fds, err := syscall.Socketpair(syscall.AF_UNIX, syscall.SOCK_STREAM, 0)
+ fds, err := unix.Socketpair(unix.AF_UNIX, unix.SOCK_STREAM, 0)
if err != nil {
t.Fatal(err)
}
- defer syscall.Close(fds[0])
+ defer unix.Close(fds[0])
_, err = cont.executeSync(&control.ExecArgs{
Argv: []string{"/nonexist"},
@@ -956,7 +956,7 @@ func TestKillPid(t *testing.T) {
pid = int32(p.PID)
}
}
- if err := cont.SignalProcess(syscall.SIGKILL, pid); err != nil {
+ if err := cont.SignalProcess(unix.SIGKILL, pid); err != nil {
t.Fatalf("failed to signal process %d: %v", pid, err)
}
@@ -1601,12 +1601,12 @@ func TestReadonlyRoot(t *testing.T) {
}
// Read mounts to check that root is readonly.
- out, err := executeCombinedOutput(c, "/bin/sh", "-c", "mount | grep ' / '")
+ out, err := executeCombinedOutput(c, "/bin/sh", "-c", "mount | grep ' / ' | grep -o -e '(.*)'")
if err != nil {
t.Fatalf("exec failed: %v", err)
}
- t.Logf("root mount: %q", out)
- if !strings.Contains(string(out), "(ro)") {
+ t.Logf("root mount options: %q", out)
+ if !strings.Contains(string(out), "ro") {
t.Errorf("root not mounted readonly: %q", out)
}
@@ -1615,7 +1615,7 @@ func TestReadonlyRoot(t *testing.T) {
if err != nil {
t.Fatalf("touch file in ro mount: %v", err)
}
- if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
+ if !ws.Exited() || unix.Errno(ws.ExitStatus()) != unix.EPERM {
t.Fatalf("wrong waitStatus: %v", ws)
}
})
@@ -1659,13 +1659,13 @@ func TestReadonlyMount(t *testing.T) {
}
// Read mounts to check that volume is readonly.
- cmd := fmt.Sprintf("mount | grep ' %s '", dir)
+ cmd := fmt.Sprintf("mount | grep ' %s ' | grep -o -e '(.*)'", dir)
out, err := executeCombinedOutput(c, "/bin/sh", "-c", cmd)
if err != nil {
t.Fatalf("exec failed, err: %v", err)
}
- t.Logf("mount: %q", out)
- if !strings.Contains(string(out), "(ro)") {
+ t.Logf("mount options: %q", out)
+ if !strings.Contains(string(out), "ro") {
t.Errorf("volume not mounted readonly: %q", out)
}
@@ -1674,7 +1674,7 @@ func TestReadonlyMount(t *testing.T) {
if err != nil {
t.Fatalf("touch file in ro mount: %v", err)
}
- if !ws.Exited() || syscall.Errno(ws.ExitStatus()) != syscall.EPERM {
+ if !ws.Exited() || unix.Errno(ws.ExitStatus()) != unix.EPERM {
t.Fatalf("wrong WaitStatus: %v", ws)
}
})
@@ -1750,8 +1750,8 @@ func TestUIDMap(t *testing.T) {
if !ws.Exited() || ws.ExitStatus() != 0 {
t.Fatalf("container failed, waitStatus: %v", ws)
}
- st := syscall.Stat_t{}
- if err := syscall.Stat(testFile, &st); err != nil {
+ st := unix.Stat_t{}
+ if err := unix.Stat(testFile, &st); err != nil {
t.Fatalf("error stat /testfile: %v", err)
}
@@ -1880,7 +1880,7 @@ func doGoferExitTest(t *testing.T, vfs2 bool) {
}
err = blockUntilWaitable(c.GoferPid)
- if err != nil && err != syscall.ECHILD {
+ if err != nil && err != unix.ECHILD {
t.Errorf("error waiting for gofer to exit: %v", err)
}
}
@@ -1929,7 +1929,7 @@ func TestUserLog(t *testing.T) {
}
// sched_rr_get_interval - not implemented in gvisor.
- num := strconv.Itoa(syscall.SYS_SCHED_RR_GET_INTERVAL)
+ num := strconv.Itoa(unix.SYS_SCHED_RR_GET_INTERVAL)
spec := testutil.NewSpecWithArgs(app, "syscall", "--syscall="+num)
conf := testutil.TestConfig(t)
_, bundleDir, cleanup, err := testutil.SetupContainer(spec, conf)
@@ -2159,10 +2159,10 @@ func TestMountPropagation(t *testing.T) {
f.Close()
// Setup src as a shared mount.
- if err := syscall.Mount(src, src, "bind", syscall.MS_BIND, ""); err != nil {
+ if err := unix.Mount(src, src, "bind", unix.MS_BIND, ""); err != nil {
t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err)
}
- if err := syscall.Mount("", src, "", syscall.MS_SHARED, ""); err != nil {
+ if err := unix.Mount("", src, "", unix.MS_SHARED, ""); err != nil {
t.Fatalf("mount(%q, MS_SHARED): %v", srcMnt, err)
}
@@ -2209,7 +2209,7 @@ func TestMountPropagation(t *testing.T) {
// After the container is started, mount dir inside source and check what
// happens to both destinations.
- if err := syscall.Mount(dir, srcMnt, "bind", syscall.MS_BIND, ""); err != nil {
+ if err := unix.Mount(dir, srcMnt, "bind", unix.MS_BIND, ""); err != nil {
t.Fatalf("mount(%q, %q, MS_BIND): %v", dir, srcMnt, err)
}
@@ -2449,7 +2449,7 @@ func TestCreateWithCorruptedStateFile(t *testing.T) {
}
}
-func execute(cont *Container, name string, arg ...string) (syscall.WaitStatus, error) {
+func execute(cont *Container, name string, arg ...string) (unix.WaitStatus, error) {
args := &control.ExecArgs{
Filename: name,
Argv: append([]string{name}, arg...),
@@ -2483,7 +2483,7 @@ func executeCombinedOutput(cont *Container, name string, arg ...string) ([]byte,
}
// executeSync synchronously executes a new process.
-func (c *Container) executeSync(args *control.ExecArgs) (syscall.WaitStatus, error) {
+func (c *Container) executeSync(args *control.ExecArgs) (unix.WaitStatus, error) {
pid, err := c.Execute(args)
if err != nil {
return 0, fmt.Errorf("error executing: %v", err)
diff --git a/runsc/container/multi_container_test.go b/runsc/container/multi_container_test.go
index b434cdb23..0f0a223ce 100644
--- a/runsc/container/multi_container_test.go
+++ b/runsc/container/multi_container_test.go
@@ -22,11 +22,11 @@ import (
"path"
"path/filepath"
"strings"
- "syscall"
"testing"
"time"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cleanup"
"gvisor.dev/gvisor/pkg/sentry/control"
"gvisor.dev/gvisor/pkg/sentry/kernel"
@@ -403,7 +403,7 @@ func TestMultiPIDNSKill(t *testing.T) {
t.Logf("Container %q procs: %s", c.ID, procListToString(procs))
pidToKill := procs[processes-1].PID
t.Logf("PID to kill: %d", pidToKill)
- if err := c.SignalProcess(syscall.SIGKILL, int32(pidToKill)); err != nil {
+ if err := c.SignalProcess(unix.SIGKILL, int32(pidToKill)); err != nil {
t.Errorf("container.SignalProcess: %v", err)
}
// Wait for the process to get killed.
@@ -432,7 +432,7 @@ func TestMultiPIDNSKill(t *testing.T) {
pidToKill = procs[len(procs)-1].PID
t.Logf("PID that should not be killed: %d", pidToKill)
- err = c.SignalProcess(syscall.SIGKILL, int32(pidToKill))
+ err = c.SignalProcess(unix.SIGKILL, int32(pidToKill))
if err == nil {
t.Fatalf("killing another container's process should fail")
}
@@ -640,7 +640,7 @@ func TestMultiContainerSignal(t *testing.T) {
}
// Kill process 2.
- if err := containers[1].SignalContainer(syscall.SIGKILL, false); err != nil {
+ if err := containers[1].SignalContainer(unix.SIGKILL, false); err != nil {
t.Errorf("failed to kill process 2: %v", err)
}
@@ -660,10 +660,10 @@ func TestMultiContainerSignal(t *testing.T) {
t.Errorf("failed to destroy container: %v", err)
}
_, _, err = specutils.RetryEintr(func() (uintptr, uintptr, error) {
- cpid, err := syscall.Wait4(goferPid, nil, 0, nil)
+ cpid, err := unix.Wait4(goferPid, nil, 0, nil)
return uintptr(cpid), 0, err
})
- if err != syscall.ECHILD {
+ if err != unix.ECHILD {
t.Errorf("error waiting for gofer to exit: %v", err)
}
// Make sure process 1 is still running.
@@ -673,28 +673,28 @@ func TestMultiContainerSignal(t *testing.T) {
// Now that process 2 is gone, ensure we get an error trying to
// signal it again.
- if err := containers[1].SignalContainer(syscall.SIGKILL, false); err == nil {
+ if err := containers[1].SignalContainer(unix.SIGKILL, false); err == nil {
t.Errorf("container %q shouldn't exist, but we were able to signal it", containers[1].ID)
}
// Kill process 1.
- if err := containers[0].SignalContainer(syscall.SIGKILL, false); err != nil {
+ if err := containers[0].SignalContainer(unix.SIGKILL, false); err != nil {
t.Errorf("failed to kill process 1: %v", err)
}
// Ensure that container's gofer and sandbox process are no more.
err = blockUntilWaitable(containers[0].GoferPid)
- if err != nil && err != syscall.ECHILD {
+ if err != nil && err != unix.ECHILD {
t.Errorf("error waiting for gofer to exit: %v", err)
}
err = blockUntilWaitable(containers[0].Sandbox.Pid)
- if err != nil && err != syscall.ECHILD {
+ if err != nil && err != unix.ECHILD {
t.Errorf("error waiting for sandbox to exit: %v", err)
}
// The sentry should be gone, so signaling should yield an error.
- if err := containers[0].SignalContainer(syscall.SIGKILL, false); err == nil {
+ if err := containers[0].SignalContainer(unix.SIGKILL, false); err == nil {
t.Errorf("sandbox %q shouldn't exist, but we were able to signal it", containers[0].Sandbox.ID)
}
@@ -893,7 +893,7 @@ func TestMultiContainerKillAll(t *testing.T) {
if tc.killContainer {
// First kill the init process to make the container be stopped with
// processes still running inside.
- containers[1].SignalContainer(syscall.SIGKILL, false)
+ containers[1].SignalContainer(unix.SIGKILL, false)
op := func() error {
c, err := Load(conf.RootDir, FullID{ContainerID: ids[1]}, LoadOpts{})
if err != nil {
@@ -914,7 +914,7 @@ func TestMultiContainerKillAll(t *testing.T) {
t.Fatalf("failed to load child container %q: %v", c.ID, err)
}
// Kill'Em All
- if err := c.SignalContainer(syscall.SIGKILL, true); err != nil {
+ if err := c.SignalContainer(unix.SIGKILL, true); err != nil {
t.Fatalf("failed to send SIGKILL to container %q: %v", c.ID, err)
}
@@ -1640,8 +1640,8 @@ func TestMultiContainerGoferKilled(t *testing.T) {
}
// Kill container's gofer.
- if err := syscall.Kill(c.GoferPid, syscall.SIGKILL); err != nil {
- t.Fatalf("syscall.Kill(%d, SIGKILL)=%v", c.GoferPid, err)
+ if err := unix.Kill(c.GoferPid, unix.SIGKILL); err != nil {
+ t.Fatalf("unix.Kill(%d, SIGKILL)=%v", c.GoferPid, err)
}
// Wait until container stops.
@@ -1672,8 +1672,8 @@ func TestMultiContainerGoferKilled(t *testing.T) {
// Kill root container's gofer to bring entire sandbox down.
c = containers[0]
- if err := syscall.Kill(c.GoferPid, syscall.SIGKILL); err != nil {
- t.Fatalf("syscall.Kill(%d, SIGKILL)=%v", c.GoferPid, err)
+ if err := unix.Kill(c.GoferPid, unix.SIGKILL); err != nil {
+ t.Fatalf("unix.Kill(%d, SIGKILL)=%v", c.GoferPid, err)
}
// Wait until sandbox stops. waitForProcessList will loop until sandbox exits
diff --git a/runsc/container/state_file.go b/runsc/container/state_file.go
index c46322ba4..0399903a0 100644
--- a/runsc/container/state_file.go
+++ b/runsc/container/state_file.go
@@ -22,9 +22,9 @@ import (
"path/filepath"
"regexp"
"strings"
- "syscall"
"github.com/gofrs/flock"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/sync"
)
@@ -89,7 +89,7 @@ func Load(rootDir string, id FullID, opts LoadOpts) (*Container, error) {
c.changeStatus(Stopped)
}
case Running:
- if err := c.SignalContainer(syscall.Signal(0), false); err != nil {
+ if err := c.SignalContainer(unix.Signal(0), false); err != nil {
c.changeStatus(Stopped)
}
}
@@ -245,7 +245,7 @@ type StateFile struct {
// lock globally locks all locking operations for the container.
func (s *StateFile) lock() error {
s.once.Do(func() {
- s.flock = flock.NewFlock(s.lockPath())
+ s.flock = flock.New(s.lockPath())
})
if err := s.flock.Lock(); err != nil {
diff --git a/runsc/fsgofer/filter/config.go b/runsc/fsgofer/filter/config.go
index d1af539cb..fd72414ce 100644
--- a/runsc/fsgofer/filter/config.go
+++ b/runsc/fsgofer/filter/config.go
@@ -16,7 +16,6 @@ package filter
import (
"os"
- "syscall"
"golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
@@ -25,12 +24,12 @@ import (
// allowedSyscalls is the set of syscalls executed by the gofer.
var allowedSyscalls = seccomp.SyscallRules{
- syscall.SYS_ACCEPT: {},
- syscall.SYS_CLOCK_GETTIME: {},
- syscall.SYS_CLOSE: {},
- syscall.SYS_DUP: {},
- syscall.SYS_EPOLL_CTL: {},
- syscall.SYS_EPOLL_PWAIT: []seccomp.Rule{
+ unix.SYS_ACCEPT: {},
+ unix.SYS_CLOCK_GETTIME: {},
+ unix.SYS_CLOSE: {},
+ unix.SYS_DUP: {},
+ unix.SYS_EPOLL_CTL: {},
+ unix.SYS_EPOLL_PWAIT: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
@@ -39,34 +38,34 @@ var allowedSyscalls = seccomp.SyscallRules{
seccomp.EqualTo(0),
},
},
- syscall.SYS_EVENTFD2: []seccomp.Rule{
+ unix.SYS_EVENTFD2: []seccomp.Rule{
{
seccomp.EqualTo(0),
seccomp.EqualTo(0),
},
},
- syscall.SYS_EXIT: {},
- syscall.SYS_EXIT_GROUP: {},
- syscall.SYS_FALLOCATE: []seccomp.Rule{
+ unix.SYS_EXIT: {},
+ unix.SYS_EXIT_GROUP: {},
+ unix.SYS_FALLOCATE: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.EqualTo(0),
},
},
- syscall.SYS_FCHMOD: {},
- syscall.SYS_FCHOWNAT: {},
- syscall.SYS_FCNTL: []seccomp.Rule{
+ unix.SYS_FCHMOD: {},
+ unix.SYS_FCHOWNAT: {},
+ unix.SYS_FCNTL: []seccomp.Rule{
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.F_GETFL),
+ seccomp.EqualTo(unix.F_GETFL),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.F_SETFL),
+ seccomp.EqualTo(unix.F_SETFL),
},
{
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.F_GETFD),
+ seccomp.EqualTo(unix.F_GETFD),
},
// Used by flipcall.PacketWindowAllocator.Init().
{
@@ -74,11 +73,11 @@ var allowedSyscalls = seccomp.SyscallRules{
seccomp.EqualTo(unix.F_ADD_SEALS),
},
},
- syscall.SYS_FSTAT: {},
- syscall.SYS_FSTATFS: {},
- syscall.SYS_FSYNC: {},
- syscall.SYS_FTRUNCATE: {},
- syscall.SYS_FUTEX: {
+ unix.SYS_FSTAT: {},
+ unix.SYS_FSTATFS: {},
+ unix.SYS_FSYNC: {},
+ unix.SYS_FTRUNCATE: {},
+ unix.SYS_FUTEX: {
seccomp.Rule{
seccomp.MatchAny{},
seccomp.EqualTo(linux.FUTEX_WAIT | linux.FUTEX_PRIVATE_FLAG),
@@ -116,78 +115,78 @@ var allowedSyscalls = seccomp.SyscallRules{
seccomp.EqualTo(0),
},
},
- syscall.SYS_GETDENTS64: {},
- syscall.SYS_GETPID: {},
- unix.SYS_GETRANDOM: {},
- syscall.SYS_GETTID: {},
- syscall.SYS_GETTIMEOFDAY: {},
- syscall.SYS_LINKAT: {},
- syscall.SYS_LSEEK: {},
- syscall.SYS_MADVISE: {},
- unix.SYS_MEMFD_CREATE: {}, /// Used by flipcall.PacketWindowAllocator.Init().
- syscall.SYS_MKDIRAT: {},
- syscall.SYS_MKNODAT: {},
+ unix.SYS_GETDENTS64: {},
+ unix.SYS_GETPID: {},
+ unix.SYS_GETRANDOM: {},
+ unix.SYS_GETTID: {},
+ unix.SYS_GETTIMEOFDAY: {},
+ unix.SYS_LINKAT: {},
+ unix.SYS_LSEEK: {},
+ unix.SYS_MADVISE: {},
+ unix.SYS_MEMFD_CREATE: {}, /// Used by flipcall.PacketWindowAllocator.Init().
+ unix.SYS_MKDIRAT: {},
+ unix.SYS_MKNODAT: {},
// Used by the Go runtime as a temporarily workaround for a Linux
// 5.2-5.4 bug.
//
// See src/runtime/os_linux_x86.go.
//
// TODO(b/148688965): Remove once this is gone from Go.
- syscall.SYS_MLOCK: []seccomp.Rule{
+ unix.SYS_MLOCK: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.EqualTo(4096),
},
},
- syscall.SYS_MMAP: []seccomp.Rule{
+ unix.SYS_MMAP: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MAP_SHARED),
+ seccomp.EqualTo(unix.MAP_SHARED),
},
{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS),
+ seccomp.EqualTo(unix.MAP_PRIVATE | unix.MAP_ANONYMOUS),
},
{
seccomp.MatchAny{},
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MAP_PRIVATE | syscall.MAP_ANONYMOUS | syscall.MAP_FIXED),
+ seccomp.EqualTo(unix.MAP_PRIVATE | unix.MAP_ANONYMOUS | unix.MAP_FIXED),
},
},
- syscall.SYS_MPROTECT: {},
- syscall.SYS_MUNMAP: {},
- syscall.SYS_NANOSLEEP: {},
- syscall.SYS_OPENAT: {},
- syscall.SYS_PPOLL: {},
- syscall.SYS_PREAD64: {},
- syscall.SYS_PWRITE64: {},
- syscall.SYS_READ: {},
- syscall.SYS_READLINKAT: {},
- syscall.SYS_RECVMSG: []seccomp.Rule{
+ unix.SYS_MPROTECT: {},
+ unix.SYS_MUNMAP: {},
+ unix.SYS_NANOSLEEP: {},
+ unix.SYS_OPENAT: {},
+ unix.SYS_PPOLL: {},
+ unix.SYS_PREAD64: {},
+ unix.SYS_PWRITE64: {},
+ unix.SYS_READ: {},
+ unix.SYS_READLINKAT: {},
+ unix.SYS_RECVMSG: []seccomp.Rule{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC),
+ seccomp.EqualTo(unix.MSG_DONTWAIT | unix.MSG_TRUNC),
},
{
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MSG_DONTWAIT | syscall.MSG_TRUNC | syscall.MSG_PEEK),
+ seccomp.EqualTo(unix.MSG_DONTWAIT | unix.MSG_TRUNC | unix.MSG_PEEK),
},
},
- syscall.SYS_RENAMEAT: {},
- syscall.SYS_RESTART_SYSCALL: {},
+ unix.SYS_RENAMEAT: {},
+ unix.SYS_RESTART_SYSCALL: {},
// May be used by the runtime during panic().
- syscall.SYS_RT_SIGACTION: {},
- syscall.SYS_RT_SIGPROCMASK: {},
- syscall.SYS_RT_SIGRETURN: {},
- syscall.SYS_SCHED_YIELD: {},
- syscall.SYS_SENDMSG: []seccomp.Rule{
+ unix.SYS_RT_SIGACTION: {},
+ unix.SYS_RT_SIGPROCMASK: {},
+ unix.SYS_RT_SIGRETURN: {},
+ unix.SYS_SCHED_YIELD: {},
+ unix.SYS_SENDMSG: []seccomp.Rule{
// Used by fdchannel.Endpoint.SendFD().
{
seccomp.MatchAny{},
@@ -198,51 +197,51 @@ var allowedSyscalls = seccomp.SyscallRules{
{
seccomp.MatchAny{},
seccomp.MatchAny{},
- seccomp.EqualTo(syscall.MSG_DONTWAIT | syscall.MSG_NOSIGNAL),
+ seccomp.EqualTo(unix.MSG_DONTWAIT | unix.MSG_NOSIGNAL),
},
},
- syscall.SYS_SHUTDOWN: []seccomp.Rule{
- {seccomp.MatchAny{}, seccomp.EqualTo(syscall.SHUT_RDWR)},
+ unix.SYS_SHUTDOWN: []seccomp.Rule{
+ {seccomp.MatchAny{}, seccomp.EqualTo(unix.SHUT_RDWR)},
},
- syscall.SYS_SIGALTSTACK: {},
+ unix.SYS_SIGALTSTACK: {},
// Used by fdchannel.NewConnectedSockets().
- syscall.SYS_SOCKETPAIR: {
+ unix.SYS_SOCKETPAIR: {
{
- seccomp.EqualTo(syscall.AF_UNIX),
- seccomp.EqualTo(syscall.SOCK_SEQPACKET | syscall.SOCK_CLOEXEC),
+ seccomp.EqualTo(unix.AF_UNIX),
+ seccomp.EqualTo(unix.SOCK_SEQPACKET | unix.SOCK_CLOEXEC),
seccomp.EqualTo(0),
},
},
- syscall.SYS_SYMLINKAT: {},
- syscall.SYS_TGKILL: []seccomp.Rule{
+ unix.SYS_SYMLINKAT: {},
+ unix.SYS_TGKILL: []seccomp.Rule{
{
seccomp.EqualTo(uint64(os.Getpid())),
},
},
- syscall.SYS_UNLINKAT: {},
- syscall.SYS_UTIMENSAT: {},
- syscall.SYS_WRITE: {},
+ unix.SYS_UNLINKAT: {},
+ unix.SYS_UTIMENSAT: {},
+ unix.SYS_WRITE: {},
}
var udsSyscalls = seccomp.SyscallRules{
- syscall.SYS_SOCKET: []seccomp.Rule{
+ unix.SYS_SOCKET: []seccomp.Rule{
{
- seccomp.EqualTo(syscall.AF_UNIX),
- seccomp.EqualTo(syscall.SOCK_STREAM),
+ seccomp.EqualTo(unix.AF_UNIX),
+ seccomp.EqualTo(unix.SOCK_STREAM),
seccomp.EqualTo(0),
},
{
- seccomp.EqualTo(syscall.AF_UNIX),
- seccomp.EqualTo(syscall.SOCK_DGRAM),
+ seccomp.EqualTo(unix.AF_UNIX),
+ seccomp.EqualTo(unix.SOCK_DGRAM),
seccomp.EqualTo(0),
},
{
- seccomp.EqualTo(syscall.AF_UNIX),
- seccomp.EqualTo(syscall.SOCK_SEQPACKET),
+ seccomp.EqualTo(unix.AF_UNIX),
+ seccomp.EqualTo(unix.SOCK_SEQPACKET),
seccomp.EqualTo(0),
},
},
- syscall.SYS_CONNECT: []seccomp.Rule{
+ unix.SYS_CONNECT: []seccomp.Rule{
{
seccomp.MatchAny{},
},
diff --git a/runsc/fsgofer/filter/config_amd64.go b/runsc/fsgofer/filter/config_amd64.go
index 686753d96..2d0151dcc 100644
--- a/runsc/fsgofer/filter/config_amd64.go
+++ b/runsc/fsgofer/filter/config_amd64.go
@@ -17,30 +17,29 @@
package filter
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/seccomp"
)
func init() {
- allowedSyscalls[syscall.SYS_ARCH_PRCTL] = []seccomp.Rule{
+ allowedSyscalls[unix.SYS_ARCH_PRCTL] = []seccomp.Rule{
// TODO(b/168828518): No longer used in Go 1.16+.
{seccomp.EqualTo(linux.ARCH_SET_FS)},
}
- allowedSyscalls[syscall.SYS_CLONE] = []seccomp.Rule{
+ allowedSyscalls[unix.SYS_CLONE] = []seccomp.Rule{
// parent_tidptr and child_tidptr are always 0 because neither
// CLONE_PARENT_SETTID nor CLONE_CHILD_SETTID are used.
{
seccomp.EqualTo(
- syscall.CLONE_VM |
- syscall.CLONE_FS |
- syscall.CLONE_FILES |
- syscall.CLONE_SETTLS |
- syscall.CLONE_SIGHAND |
- syscall.CLONE_SYSVSEM |
- syscall.CLONE_THREAD),
+ unix.CLONE_VM |
+ unix.CLONE_FS |
+ unix.CLONE_FILES |
+ unix.CLONE_SETTLS |
+ unix.CLONE_SIGHAND |
+ unix.CLONE_SYSVSEM |
+ unix.CLONE_THREAD),
seccomp.MatchAny{}, // newsp
seccomp.EqualTo(0), // parent_tidptr
seccomp.EqualTo(0), // child_tidptr
@@ -49,12 +48,12 @@ func init() {
{
// TODO(b/168828518): No longer used in Go 1.16+ (on amd64).
seccomp.EqualTo(
- syscall.CLONE_VM |
- syscall.CLONE_FS |
- syscall.CLONE_FILES |
- syscall.CLONE_SIGHAND |
- syscall.CLONE_SYSVSEM |
- syscall.CLONE_THREAD),
+ unix.CLONE_VM |
+ unix.CLONE_FS |
+ unix.CLONE_FILES |
+ unix.CLONE_SIGHAND |
+ unix.CLONE_SYSVSEM |
+ unix.CLONE_THREAD),
seccomp.MatchAny{}, // newsp
seccomp.EqualTo(0), // parent_tidptr
seccomp.EqualTo(0), // child_tidptr
@@ -62,5 +61,5 @@ func init() {
},
}
- allowedSyscalls[syscall.SYS_NEWFSTATAT] = []seccomp.Rule{}
+ allowedSyscalls[unix.SYS_NEWFSTATAT] = []seccomp.Rule{}
}
diff --git a/runsc/fsgofer/filter/config_arm64.go b/runsc/fsgofer/filter/config_arm64.go
index ff0cf77a0..7d458c02d 100644
--- a/runsc/fsgofer/filter/config_arm64.go
+++ b/runsc/fsgofer/filter/config_arm64.go
@@ -17,23 +17,22 @@
package filter
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/seccomp"
)
func init() {
- allowedSyscalls[syscall.SYS_CLONE] = []seccomp.Rule{
+ allowedSyscalls[unix.SYS_CLONE] = []seccomp.Rule{
// parent_tidptr and child_tidptr are always 0 because neither
// CLONE_PARENT_SETTID nor CLONE_CHILD_SETTID are used.
{
seccomp.EqualTo(
- syscall.CLONE_VM |
- syscall.CLONE_FS |
- syscall.CLONE_FILES |
- syscall.CLONE_SIGHAND |
- syscall.CLONE_SYSVSEM |
- syscall.CLONE_THREAD),
+ unix.CLONE_VM |
+ unix.CLONE_FS |
+ unix.CLONE_FILES |
+ unix.CLONE_SIGHAND |
+ unix.CLONE_SYSVSEM |
+ unix.CLONE_THREAD),
seccomp.MatchAny{}, // newsp
// These arguments are left uninitialized by the Go
// runtime, so they may be anything (and are unused by
@@ -44,5 +43,5 @@ func init() {
},
}
- allowedSyscalls[syscall.SYS_FSTATAT] = []seccomp.Rule{}
+ allowedSyscalls[unix.SYS_FSTATAT] = []seccomp.Rule{}
}
diff --git a/runsc/fsgofer/filter/extra_filters_msan.go b/runsc/fsgofer/filter/extra_filters_msan.go
index 8c6179c8f..d768ed0bb 100644
--- a/runsc/fsgofer/filter/extra_filters_msan.go
+++ b/runsc/fsgofer/filter/extra_filters_msan.go
@@ -17,8 +17,7 @@
package filter
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/seccomp"
)
@@ -27,7 +26,7 @@ import (
func instrumentationFilters() seccomp.SyscallRules {
log.Warningf("*** SECCOMP WARNING: MSAN is enabled: syscall filters less restrictive!")
return seccomp.SyscallRules{
- syscall.SYS_SCHED_GETAFFINITY: {},
- syscall.SYS_SET_ROBUST_LIST: {},
+ unix.SYS_SCHED_GETAFFINITY: {},
+ unix.SYS_SET_ROBUST_LIST: {},
}
}
diff --git a/runsc/fsgofer/filter/extra_filters_race.go b/runsc/fsgofer/filter/extra_filters_race.go
index cbd5c487e..9e75c025d 100644
--- a/runsc/fsgofer/filter/extra_filters_race.go
+++ b/runsc/fsgofer/filter/extra_filters_race.go
@@ -17,8 +17,7 @@
package filter
import (
- "syscall"
-
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/seccomp"
)
@@ -27,18 +26,18 @@ import (
func instrumentationFilters() seccomp.SyscallRules {
log.Warningf("*** SECCOMP WARNING: TSAN is enabled: syscall filters less restrictive!")
return seccomp.SyscallRules{
- syscall.SYS_BRK: {},
- syscall.SYS_CLOCK_NANOSLEEP: {},
- syscall.SYS_CLONE: {},
- syscall.SYS_FUTEX: {},
- syscall.SYS_MADVISE: {},
- syscall.SYS_MMAP: {},
- syscall.SYS_MUNLOCK: {},
- syscall.SYS_NANOSLEEP: {},
- syscall.SYS_OPEN: {},
- syscall.SYS_OPENAT: {},
- syscall.SYS_SET_ROBUST_LIST: {},
+ unix.SYS_BRK: {},
+ unix.SYS_CLOCK_NANOSLEEP: {},
+ unix.SYS_CLONE: {},
+ unix.SYS_FUTEX: {},
+ unix.SYS_MADVISE: {},
+ unix.SYS_MMAP: {},
+ unix.SYS_MUNLOCK: {},
+ unix.SYS_NANOSLEEP: {},
+ unix.SYS_OPEN: {},
+ unix.SYS_OPENAT: {},
+ unix.SYS_SET_ROBUST_LIST: {},
// Used within glibc's malloc.
- syscall.SYS_TIME: {},
+ unix.SYS_TIME: {},
}
}
diff --git a/runsc/fsgofer/fsgofer.go b/runsc/fsgofer/fsgofer.go
index cfa3796b1..1e80a634d 100644
--- a/runsc/fsgofer/fsgofer.go
+++ b/runsc/fsgofer/fsgofer.go
@@ -66,6 +66,9 @@ type Config struct {
// HostUDS signals whether the gofer can mount a host's UDS.
HostUDS bool
+
+ // enableXattr allows Get/SetXattr for the mounted file systems.
+ EnableXattr bool
}
type attachPoint struct {
@@ -795,12 +798,22 @@ func (l *localFile) SetAttr(valid p9.SetAttrMask, attr p9.SetAttr) error {
return err
}
-func (*localFile) GetXattr(string, uint64) (string, error) {
- return "", unix.EOPNOTSUPP
+func (l *localFile) GetXattr(name string, size uint64) (string, error) {
+ if !l.attachPoint.conf.EnableXattr {
+ return "", unix.EOPNOTSUPP
+ }
+ buffer := make([]byte, size)
+ if _, err := unix.Fgetxattr(l.file.FD(), name, buffer); err != nil {
+ return "", err
+ }
+ return string(buffer), nil
}
-func (*localFile) SetXattr(string, string, uint32) error {
- return unix.EOPNOTSUPP
+func (l *localFile) SetXattr(name string, value string, flags uint32) error {
+ if !l.attachPoint.conf.EnableXattr {
+ return unix.EOPNOTSUPP
+ }
+ return unix.Fsetxattr(l.file.FD(), name, []byte(value), int(flags))
}
func (*localFile) ListXattr(uint64) (map[string]struct{}, error) {
diff --git a/runsc/fsgofer/fsgofer_test.go b/runsc/fsgofer/fsgofer_test.go
index 99ea9bd32..a5f09f88f 100644
--- a/runsc/fsgofer/fsgofer_test.go
+++ b/runsc/fsgofer/fsgofer_test.go
@@ -565,6 +565,38 @@ func TestSetAttrOwner(t *testing.T) {
})
}
+func SetGetXattr(l *localFile, name string, value string) error {
+ if err := l.SetXattr(name, value, 0 /* flags */); err != nil {
+ return err
+ }
+ ret, err := l.GetXattr(name, uint64(len(value)))
+ if err != nil {
+ return err
+ }
+ if ret != value {
+ return fmt.Errorf("Got value %s, want %s", ret, value)
+ }
+ return nil
+}
+
+func TestSetGetXattr(t *testing.T) {
+ xattrConfs := []Config{{ROMount: false, EnableXattr: false}, {ROMount: false, EnableXattr: true}}
+ runCustom(t, []uint32{unix.S_IFREG}, xattrConfs, func(t *testing.T, s state) {
+ name := "user.test"
+ value := "tmp"
+ err := SetGetXattr(s.file, name, value)
+ if s.conf.EnableXattr {
+ if err != nil {
+ t.Fatalf("%v: SetGetXattr failed, err: %v", s, err)
+ }
+ } else {
+ if err == nil {
+ t.Fatalf("%v: SetGetXattr should have failed", s)
+ }
+ }
+ })
+}
+
func TestLink(t *testing.T) {
if !specutils.HasCapabilities(capability.CAP_DAC_READ_SEARCH) {
t.Skipf("Link test requires CAP_DAC_READ_SEARCH, running as %d", os.Getuid())
diff --git a/runsc/mitigate/BUILD b/runsc/mitigate/BUILD
index 561854e66..1238890fc 100644
--- a/runsc/mitigate/BUILD
+++ b/runsc/mitigate/BUILD
@@ -4,28 +4,20 @@ package(licenses = ["notice"])
go_library(
name = "mitigate",
- srcs = [
- "cpu.go",
- "mitigate.go",
- "mitigate_conf.go",
- ],
+ srcs = ["mitigate.go"],
visibility = [
"//runsc:__subpackages__",
],
- deps = [
- "//pkg/log",
- "//runsc/flag",
- "@in_gopkg_yaml_v2//:go_default_library",
- ],
+ deps = ["@in_gopkg_yaml_v2//:go_default_library"],
)
go_test(
name = "mitigate_test",
size = "small",
- srcs = [
- "cpu_test.go",
- "mitigate_test.go",
- ],
+ srcs = ["mitigate_test.go"],
library = ":mitigate",
- deps = ["@com_github_google_go_cmp//cmp:go_default_library"],
+ deps = [
+ "//runsc/mitigate/mock",
+ "@com_github_google_go_cmp//cmp:go_default_library",
+ ],
)
diff --git a/runsc/mitigate/cpu.go b/runsc/mitigate/cpu.go
deleted file mode 100644
index 4b2aa351f..000000000
--- a/runsc/mitigate/cpu.go
+++ /dev/null
@@ -1,423 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mitigate
-
-import (
- "fmt"
- "io/ioutil"
- "regexp"
- "strconv"
- "strings"
-)
-
-const (
- // mds is the only bug we care about.
- mds = "mds"
-
- // Constants for parsing /proc/cpuinfo.
- processorKey = "processor"
- vendorIDKey = "vendor_id"
- cpuFamilyKey = "cpu family"
- modelKey = "model"
- physicalIDKey = "physical id"
- coreIDKey = "core id"
- bugsKey = "bugs"
-
- // Path to shutdown a CPU.
- cpuOnlineTemplate = "/sys/devices/system/cpu/cpu%d/online"
-)
-
-// cpuSet contains a map of all CPUs on the system, mapped
-// by Physical ID and CoreIDs. threads with the same
-// Core and Physical ID are Hyperthread pairs.
-type cpuSet map[cpuID]*threadGroup
-
-// newCPUSet creates a CPUSet from data read from /proc/cpuinfo.
-func newCPUSet(data []byte, vulnerable func(thread) bool) (cpuSet, error) {
- processors, err := getThreads(string(data))
- if err != nil {
- return nil, err
- }
-
- set := make(cpuSet)
- for _, p := range processors {
- // Each ID is of the form physicalID:coreID. Hyperthread pairs
- // have identical physical and core IDs. We need to match
- // Hyperthread pairs so that we can shutdown all but one per
- // pair.
- core, ok := set[p.id]
- if !ok {
- core = &threadGroup{}
- set[p.id] = core
- }
- core.isVulnerable = core.isVulnerable || vulnerable(p)
- core.threads = append(core.threads, p)
- }
- return set, nil
-}
-
-// newCPUSetFromPossible makes a cpuSet data read from
-// /sys/devices/system/cpu/possible. This is used in enable operations
-// where the caller simply wants to enable all CPUS.
-func newCPUSetFromPossible(data []byte) (cpuSet, error) {
- threads, err := getThreadsFromPossible(data)
- if err != nil {
- return nil, err
- }
-
- // We don't care if a CPU is vulnerable or not, we just
- // want to return a list of all CPUs on the host.
- set := cpuSet{
- threads[0].id: &threadGroup{
- threads: threads,
- isVulnerable: false,
- },
- }
- return set, nil
-}
-
-// String implements the String method for CPUSet.
-func (c cpuSet) String() string {
- ret := ""
- for _, tg := range c {
- ret += fmt.Sprintf("%s\n", tg)
- }
- return ret
-}
-
-// getRemainingList returns the list of threads that will remain active
-// after mitigation.
-func (c cpuSet) getRemainingList() []thread {
- threads := make([]thread, 0, len(c))
- for _, core := range c {
- // If we're vulnerable, take only one thread from the pair.
- if core.isVulnerable {
- threads = append(threads, core.threads[0])
- continue
- }
- // Otherwise don't shutdown anything.
- threads = append(threads, core.threads...)
- }
- return threads
-}
-
-// getShutdownList returns the list of threads that will be shutdown on
-// mitigation.
-func (c cpuSet) getShutdownList() []thread {
- threads := make([]thread, 0)
- for _, core := range c {
- // Only if we're vulnerable do shutdown anything. In this case,
- // shutdown all but the first entry.
- if core.isVulnerable && len(core.threads) > 1 {
- threads = append(threads, core.threads[1:]...)
- }
- }
- return threads
-}
-
-// threadGroup represents Hyperthread pairs on the same physical/core ID.
-type threadGroup struct {
- threads []thread
- isVulnerable bool
-}
-
-// String implements the String method for threadGroup.
-func (c threadGroup) String() string {
- ret := fmt.Sprintf("ThreadGroup:\nIsVulnerable: %t\n", c.isVulnerable)
- for _, processor := range c.threads {
- ret += fmt.Sprintf("%s\n", processor)
- }
- return ret
-}
-
-// getThreads returns threads structs from reading /proc/cpuinfo.
-func getThreads(data string) ([]thread, error) {
- // Each processor entry should start with the
- // processor key. Find the beginings of each.
- r := buildRegex(processorKey, `\d+`)
- indices := r.FindAllStringIndex(data, -1)
- if len(indices) < 1 {
- return nil, fmt.Errorf("no cpus found for: %q", data)
- }
-
- // Add the ending index for last entry.
- indices = append(indices, []int{len(data), -1})
-
- // Valid cpus are now defined by strings in between
- // indexes (e.g. data[index[i], index[i+1]]).
- // There should be len(indicies) - 1 CPUs
- // since the last index is the end of the string.
- cpus := make([]thread, 0, len(indices))
- // Find each string that represents a CPU. These begin "processor".
- for i := 1; i < len(indices); i++ {
- start := indices[i-1][0]
- end := indices[i][0]
- // Parse the CPU entry, which should be between start/end.
- c, err := newThread(data[start:end])
- if err != nil {
- return nil, err
- }
- cpus = append(cpus, c)
- }
- return cpus, nil
-}
-
-// getThreadsFromPossible makes threads from data read from /sys/devices/system/cpu/possible.
-func getThreadsFromPossible(data []byte) ([]thread, error) {
- possibleRegex := regexp.MustCompile(`(?m)^(\d+)(-(\d+))?$`)
- matches := possibleRegex.FindStringSubmatch(string(data))
- if len(matches) != 4 {
- return nil, fmt.Errorf("mismatch regex from %s: %q", allPossibleCPUs, string(data))
- }
-
- // If matches[3] is empty, we only have one cpu entry.
- if matches[3] == "" {
- matches[3] = matches[1]
- }
-
- begin, err := strconv.ParseInt(matches[1], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("failed to parse begin: %v", err)
- }
- end, err := strconv.ParseInt(matches[3], 10, 64)
- if err != nil {
- return nil, fmt.Errorf("failed to parse end: %v", err)
- }
- if begin > end || begin < 0 || end < 0 {
- return nil, fmt.Errorf("invalid cpu bounds from possible: begin: %d end: %d", begin, end)
- }
-
- ret := make([]thread, 0, end-begin)
- for i := begin; i <= end; i++ {
- ret = append(ret, thread{
- processorNumber: i,
- id: cpuID{
- physicalID: 0, // we don't care about id for enable ops.
- coreID: 0,
- },
- })
- }
-
- return ret, nil
-}
-
-// cpuID for each thread is defined by the physical and
-// core IDs. If equal, two threads are Hyperthread pairs.
-type cpuID struct {
- physicalID int64
- coreID int64
-}
-
-// type cpu represents pertinent info about a cpu.
-type thread struct {
- processorNumber int64 // the processor number of this CPU.
- vendorID string // the vendorID of CPU (e.g. AuthenticAMD).
- cpuFamily int64 // CPU family number (e.g. 6 for CascadeLake/Skylake).
- model int64 // CPU model number (e.g. 85 for CascadeLake/Skylake).
- id cpuID // id for this thread
- bugs map[string]struct{} // map of vulnerabilities parsed from the 'bugs' field.
-}
-
-// newThread parses a CPU from a single cpu entry from /proc/cpuinfo.
-func newThread(data string) (thread, error) {
- empty := thread{}
- processor, err := parseProcessor(data)
- if err != nil {
- return empty, err
- }
-
- vendorID, err := parseVendorID(data)
- if err != nil {
- return empty, err
- }
-
- cpuFamily, err := parseCPUFamily(data)
- if err != nil {
- return empty, err
- }
-
- model, err := parseModel(data)
- if err != nil {
- return empty, err
- }
-
- physicalID, err := parsePhysicalID(data)
- if err != nil {
- return empty, err
- }
-
- coreID, err := parseCoreID(data)
- if err != nil {
- return empty, err
- }
-
- bugs, err := parseBugs(data)
- if err != nil {
- return empty, err
- }
-
- return thread{
- processorNumber: processor,
- vendorID: vendorID,
- cpuFamily: cpuFamily,
- model: model,
- id: cpuID{
- physicalID: physicalID,
- coreID: coreID,
- },
- bugs: bugs,
- }, nil
-}
-
-// String implements the String method for thread.
-func (t thread) String() string {
- template := `CPU: %d
-CPU ID: %+v
-Vendor: %s
-Family/Model: %d/%d
-Bugs: %s
-`
- bugs := make([]string, 0)
- for bug := range t.bugs {
- bugs = append(bugs, bug)
- }
-
- return fmt.Sprintf(template, t.processorNumber, t.id, t.vendorID, t.cpuFamily, t.model, strings.Join(bugs, ","))
-}
-
-// enable turns on the CPU by writing 1 to /sys/devices/cpu/cpu{N}/online.
-func (t thread) enable() error {
- cpuPath := fmt.Sprintf(cpuOnlineTemplate, t.processorNumber)
- return ioutil.WriteFile(cpuPath, []byte{'1'}, 0644)
-}
-
-// disable turns off the CPU by writing 0 to /sys/devices/cpu/cpu{N}/online.
-func (t thread) disable() error {
- cpuPath := fmt.Sprintf(cpuOnlineTemplate, t.processorNumber)
- return ioutil.WriteFile(cpuPath, []byte{'0'}, 0644)
-}
-
-// isVulnerable checks if a CPU is vulnerable to mds.
-func (t thread) isVulnerable() bool {
- _, ok := t.bugs[mds]
- return ok
-}
-
-// isActive checks if a CPU is active from /sys/devices/system/cpu/cpu{N}/online
-// If the file does not exist (ioutil returns in error), we assume the CPU is on.
-func (t thread) isActive() bool {
- cpuPath := fmt.Sprintf(cpuOnlineTemplate, t.processorNumber)
- data, err := ioutil.ReadFile(cpuPath)
- if err != nil {
- return true
- }
- return len(data) > 0 && data[0] != '0'
-}
-
-// similarTo checks family/model/bugs fields for equality of two
-// processors.
-func (t thread) similarTo(other thread) bool {
- if t.vendorID != other.vendorID {
- return false
- }
-
- if other.cpuFamily != t.cpuFamily {
- return false
- }
-
- if other.model != t.model {
- return false
- }
-
- if len(other.bugs) != len(t.bugs) {
- return false
- }
-
- for bug := range t.bugs {
- if _, ok := other.bugs[bug]; !ok {
- return false
- }
- }
- return true
-}
-
-// parseProcessor grabs the processor field from /proc/cpuinfo output.
-func parseProcessor(data string) (int64, error) {
- return parseIntegerResult(data, processorKey)
-}
-
-// parseVendorID grabs the vendor_id field from /proc/cpuinfo output.
-func parseVendorID(data string) (string, error) {
- return parseRegex(data, vendorIDKey, `[\w\d]+`)
-}
-
-// parseCPUFamily grabs the cpu family field from /proc/cpuinfo output.
-func parseCPUFamily(data string) (int64, error) {
- return parseIntegerResult(data, cpuFamilyKey)
-}
-
-// parseModel grabs the model field from /proc/cpuinfo output.
-func parseModel(data string) (int64, error) {
- return parseIntegerResult(data, modelKey)
-}
-
-// parsePhysicalID parses the physical id field.
-func parsePhysicalID(data string) (int64, error) {
- return parseIntegerResult(data, physicalIDKey)
-}
-
-// parseCoreID parses the core id field.
-func parseCoreID(data string) (int64, error) {
- return parseIntegerResult(data, coreIDKey)
-}
-
-// parseBugs grabs the bugs field from /proc/cpuinfo output.
-func parseBugs(data string) (map[string]struct{}, error) {
- result, err := parseRegex(data, bugsKey, `[\d\w\s]*`)
- if err != nil {
- return nil, err
- }
- bugs := strings.Split(result, " ")
- ret := make(map[string]struct{}, len(bugs))
- for _, bug := range bugs {
- ret[bug] = struct{}{}
- }
- return ret, nil
-}
-
-// parseIntegerResult parses fields expecting an integer.
-func parseIntegerResult(data, key string) (int64, error) {
- result, err := parseRegex(data, key, `\d+`)
- if err != nil {
- return 0, err
- }
- return strconv.ParseInt(result, 0, 64)
-}
-
-// buildRegex builds a regex for parsing each CPU field.
-func buildRegex(key, match string) *regexp.Regexp {
- reg := fmt.Sprintf(`(?m)^%s\s*:\s*(.*)$`, key)
- return regexp.MustCompile(reg)
-}
-
-// parseRegex parses data with key inserted into a standard regex template.
-func parseRegex(data, key, match string) (string, error) {
- r := buildRegex(key, match)
- matches := r.FindStringSubmatch(data)
- if len(matches) < 2 {
- return "", fmt.Errorf("failed to match key %q: %q", key, data)
- }
- return matches[1], nil
-}
diff --git a/runsc/mitigate/cpu_test.go b/runsc/mitigate/cpu_test.go
deleted file mode 100644
index 374333465..000000000
--- a/runsc/mitigate/cpu_test.go
+++ /dev/null
@@ -1,605 +0,0 @@
-// Copyright 2021 The gVisor Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package mitigate
-
-import (
- "fmt"
- "io/ioutil"
- "strings"
- "testing"
-)
-
-// mockCPU represents data from CPUs that will be mitigated.
-type mockCPU struct {
- name string
- vendorID string
- family int
- model int
- modelName string
- bugs string
- physicalCores int
- cores int
- threadsPerCore int
-}
-
-var cascadeLake4 = mockCPU{
- name: "CascadeLake",
- vendorID: "GenuineIntel",
- family: 6,
- model: 85,
- modelName: "Intel(R) Xeon(R) CPU",
- bugs: "spectre_v1 spectre_v2 spec_store_bypass mds swapgs taa",
- physicalCores: 1,
- cores: 2,
- threadsPerCore: 2,
-}
-
-var haswell2 = mockCPU{
- name: "Haswell",
- vendorID: "GenuineIntel",
- family: 6,
- model: 63,
- modelName: "Intel(R) Xeon(R) CPU",
- bugs: "cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs",
- physicalCores: 1,
- cores: 1,
- threadsPerCore: 2,
-}
-
-var haswell2core = mockCPU{
- name: "Haswell2Physical",
- vendorID: "GenuineIntel",
- family: 6,
- model: 63,
- modelName: "Intel(R) Xeon(R) CPU",
- bugs: "cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs",
- physicalCores: 2,
- cores: 1,
- threadsPerCore: 1,
-}
-
-var amd8 = mockCPU{
- name: "AMD",
- vendorID: "AuthenticAMD",
- family: 23,
- model: 49,
- modelName: "AMD EPYC 7B12",
- bugs: "sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass",
- physicalCores: 4,
- cores: 1,
- threadsPerCore: 2,
-}
-
-// makeCPUString makes a string formated like /proc/cpuinfo for each cpuTestCase
-func (tc mockCPU) makeCPUString() string {
- template := `processor : %d
-vendor_id : %s
-cpu family : %d
-model : %d
-model name : %s
-physical id : %d
-core id : %d
-cpu cores : %d
-bugs : %s
-`
- ret := ``
- for i := 0; i < tc.physicalCores; i++ {
- for j := 0; j < tc.cores; j++ {
- for k := 0; k < tc.threadsPerCore; k++ {
- processorNum := (i*tc.cores+j)*tc.threadsPerCore + k
- ret += fmt.Sprintf(template,
- processorNum, /*processor*/
- tc.vendorID, /*vendor_id*/
- tc.family, /*cpu family*/
- tc.model, /*model*/
- tc.modelName, /*model name*/
- i, /*physical id*/
- j, /*core id*/
- tc.cores*tc.physicalCores, /*cpu cores*/
- tc.bugs /*bugs*/)
- }
- }
- }
- return ret
-}
-
-func (tc mockCPU) makeSysPossibleString() string {
- max := tc.physicalCores * tc.cores * tc.threadsPerCore
- if max == 1 {
- return "0"
- }
- return fmt.Sprintf("0-%d", max-1)
-}
-
-// TestMockCPUSet tests mock cpu test cases against the cpuSet functions.
-func TestMockCPUSet(t *testing.T) {
- for _, tc := range []struct {
- testCase mockCPU
- isVulnerable bool
- }{
- {
- testCase: amd8,
- isVulnerable: false,
- },
- {
- testCase: haswell2,
- isVulnerable: true,
- },
- {
- testCase: haswell2core,
- isVulnerable: true,
- },
-
- {
- testCase: cascadeLake4,
- isVulnerable: true,
- },
- } {
- t.Run(tc.testCase.name, func(t *testing.T) {
- data := tc.testCase.makeCPUString()
- vulnerable := func(t thread) bool {
- return t.isVulnerable()
- }
- set, err := newCPUSet([]byte(data), vulnerable)
- if err != nil {
- t.Fatalf("Failed to ")
- }
- remaining := set.getRemainingList()
- // In the non-vulnerable case, no cores should be shutdown so all should remain.
- want := tc.testCase.physicalCores * tc.testCase.cores * tc.testCase.threadsPerCore
- if tc.isVulnerable {
- want = tc.testCase.physicalCores * tc.testCase.cores
- }
-
- if want != len(remaining) {
- t.Fatalf("Failed to shutdown the correct number of cores: want: %d got: %d", want, len(remaining))
- }
-
- if !tc.isVulnerable {
- return
- }
-
- // If the set is vulnerable, we expect only 1 thread per hyperthread pair.
- for _, r := range remaining {
- if _, ok := set[r.id]; !ok {
- t.Fatalf("Entry %+v not in map, there must be two entries in the same thread group.", r)
- }
- delete(set, r.id)
- }
-
- possible := tc.testCase.makeSysPossibleString()
- set, err = newCPUSetFromPossible([]byte(possible))
- if err != nil {
- t.Fatalf("Failed to make cpuSet: %v", err)
- }
-
- want = tc.testCase.physicalCores * tc.testCase.cores * tc.testCase.threadsPerCore
- got := len(set.getRemainingList())
- if got != want {
- t.Fatalf("Returned the wrong number of CPUs want: %d got: %d", want, got)
- }
- })
- }
-}
-
-// TestGetCPU tests basic parsing of single CPU strings from reading
-// /proc/cpuinfo.
-func TestGetCPU(t *testing.T) {
- data := `processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 85
-physical id: 0
-core id : 0
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit
-`
- want := thread{
- processorNumber: 0,
- vendorID: "GenuineIntel",
- cpuFamily: 6,
- model: 85,
- id: cpuID{
- physicalID: 0,
- coreID: 0,
- },
- bugs: map[string]struct{}{
- "cpu_meltdown": struct{}{},
- "spectre_v1": struct{}{},
- "spectre_v2": struct{}{},
- "spec_store_bypass": struct{}{},
- "l1tf": struct{}{},
- "mds": struct{}{},
- "swapgs": struct{}{},
- "taa": struct{}{},
- "itlb_multihit": struct{}{},
- },
- }
-
- got, err := newThread(data)
- if err != nil {
- t.Fatalf("getCpu failed with error: %v", err)
- }
-
- if !want.similarTo(got) {
- t.Fatalf("Failed cpus not similar: got: %+v, want: %+v", got, want)
- }
-
- if !got.isVulnerable() {
- t.Fatalf("Failed: cpu should be vulnerable.")
- }
-}
-
-func TestInvalid(t *testing.T) {
- result, err := getThreads(`something not a processor`)
- if err == nil {
- t.Fatalf("getCPU set didn't return an error: %+v", result)
- }
-
- if !strings.Contains(err.Error(), "no cpus") {
- t.Fatalf("Incorrect error returned: %v", err)
- }
-}
-
-// TestCPUSet tests getting the right number of CPUs from
-// parsing full output of /proc/cpuinfo.
-func TestCPUSet(t *testing.T) {
- data := `processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 63
-model name : Intel(R) Xeon(R) CPU @ 2.30GHz
-stepping : 0
-microcode : 0x1
-cpu MHz : 2299.998
-cache size : 46080 KB
-physical id : 0
-siblings : 2
-core id : 0
-cpu cores : 1
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4599.99
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:
-
-processor : 1
-vendor_id : GenuineIntel
-cpu family : 6
-model : 63
-model name : Intel(R) Xeon(R) CPU @ 2.30GHz
-stepping : 0
-microcode : 0x1
-cpu MHz : 2299.998
-cache size : 46080 KB
-physical id : 0
-siblings : 2
-core id : 0
-cpu cores : 1
-apicid : 1
-initial apicid : 1
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4599.99
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:
-`
- cpuSet, err := getThreads(data)
- if err != nil {
- t.Fatalf("getCPUSet failed: %v", err)
- }
-
- wantCPULen := 2
- if len(cpuSet) != wantCPULen {
- t.Fatalf("Num CPU mismatch: want: %d, got: %d", wantCPULen, len(cpuSet))
- }
-
- wantCPU := thread{
- vendorID: "GenuineIntel",
- cpuFamily: 6,
- model: 63,
- bugs: map[string]struct{}{
- "cpu_meltdown": struct{}{},
- "spectre_v1": struct{}{},
- "spectre_v2": struct{}{},
- "spec_store_bypass": struct{}{},
- "l1tf": struct{}{},
- "mds": struct{}{},
- "swapgs": struct{}{},
- },
- }
-
- for _, c := range cpuSet {
- if !wantCPU.similarTo(c) {
- t.Fatalf("Failed cpus not equal: got: %+v, want: %+v", c, wantCPU)
- }
- }
-}
-
-// TestReadFile is a smoke test for parsing methods.
-func TestReadFile(t *testing.T) {
- data, err := ioutil.ReadFile("/proc/cpuinfo")
- if err != nil {
- t.Fatalf("Failed to read cpuinfo: %v", err)
- }
-
- vulnerable := func(t thread) bool {
- return t.isVulnerable()
- }
-
- set, err := newCPUSet(data, vulnerable)
- if err != nil {
- t.Fatalf("Failed to parse CPU data %v\n%s", err, data)
- }
-
- if len(set) < 1 {
- t.Fatalf("Failed to parse any CPUs: %d", len(set))
- }
-
- t.Log(set)
-}
-
-// TestVulnerable tests if the isVulnerable method is correct
-// among known CPUs in GCP.
-func TestVulnerable(t *testing.T) {
- const haswell = `processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 63
-model name : Intel(R) Xeon(R) CPU @ 2.30GHz
-stepping : 0
-microcode : 0x1
-cpu MHz : 2299.998
-cache size : 46080 KB
-physical id : 0
-siblings : 4
-core id : 0
-cpu cores : 2
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
-bogomips : 4599.99
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:`
-
- const skylake = `processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 85
-model name : Intel(R) Xeon(R) CPU @ 2.00GHz
-stepping : 3
-microcode : 0x1
-cpu MHz : 2000.180
-cache size : 39424 KB
-physical id : 0
-siblings : 2
-core id : 0
-cpu cores : 1
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat md_clear arch_capabilities
-bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa
-bogomips : 4000.36
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:`
-
- const cascade = `processor : 0
-vendor_id : GenuineIntel
-cpu family : 6
-model : 85
-model name : Intel(R) Xeon(R) CPU
-stepping : 7
-microcode : 0x1
-cpu MHz : 2800.198
-cache size : 33792 KB
-physical id : 0
-siblings : 2
-core id : 0
-cpu cores : 1
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2
- ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmu
-lqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowpr
-efetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid r
-tm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves a
-rat avx512_vnni md_clear arch_capabilities
-bugs : spectre_v1 spectre_v2 spec_store_bypass mds swapgs taa
-bogomips : 5600.39
-clflush size : 64
-cache_alignment : 64
-address sizes : 46 bits physical, 48 bits virtual
-power management:`
-
- const amd = `processor : 0
-vendor_id : AuthenticAMD
-cpu family : 23
-model : 49
-model name : AMD EPYC 7B12
-stepping : 0
-microcode : 0x1000065
-cpu MHz : 2250.000
-cache size : 512 KB
-physical id : 0
-siblings : 2
-core id : 0
-cpu cores : 1
-apicid : 0
-initial apicid : 0
-fpu : yes
-fpu_exception : yes
-cpuid level : 13
-wp : yes
-flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip rdpid
-bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass
-bogomips : 4500.00
-TLB size : 3072 4K pages
-clflush size : 64
-cache_alignment : 64
-address sizes : 48 bits physical, 48 bits virtual
-power management:`
-
- for _, tc := range []struct {
- name string
- cpuString string
- vulnerable bool
- }{
- {
- name: "haswell",
- cpuString: haswell,
- vulnerable: true,
- }, {
- name: "skylake",
- cpuString: skylake,
- vulnerable: true,
- }, {
- name: "amd",
- cpuString: amd,
- vulnerable: false,
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- set, err := getThreads(tc.cpuString)
- if err != nil {
- t.Fatalf("Failed to getCPUSet:%v\n %s", err, tc.cpuString)
- }
-
- if len(set) < 1 {
- t.Fatalf("Returned empty cpu set: %v", set)
- }
-
- for _, c := range set {
- got := func() bool {
- return c.isVulnerable()
- }()
-
- if got != tc.vulnerable {
- t.Fatalf("Mismatch vulnerable for cpu %+s: got %t want: %t", tc.name, tc.vulnerable, got)
- }
- }
- })
- }
-}
-
-func TestReverse(t *testing.T) {
- const noParse = "-1-"
- for _, tc := range []struct {
- name string
- output string
- wantErr error
- wantCount int
- }{
- {
- name: "base",
- output: "0-7",
- wantErr: nil,
- wantCount: 8,
- },
- {
- name: "huge",
- output: "0-111",
- wantErr: nil,
- wantCount: 112,
- },
- {
- name: "not zero",
- output: "50-53",
- wantErr: nil,
- wantCount: 4,
- },
- {
- name: "small",
- output: "0",
- wantErr: nil,
- wantCount: 1,
- },
- {
- name: "invalid order",
- output: "10-6",
- wantErr: fmt.Errorf("invalid cpu bounds from possible: begin: %d end: %d", 10, 6),
- },
- {
- name: "no parse",
- output: noParse,
- wantErr: fmt.Errorf(`mismatch regex from /sys/devices/system/cpu/possible: %q`, noParse),
- },
- } {
- t.Run(tc.name, func(t *testing.T) {
- threads, err := getThreadsFromPossible([]byte(tc.output))
-
- switch {
- case tc.wantErr == nil:
- if err != nil {
- t.Fatalf("Wanted nil err, got: %v", err)
- }
- case err == nil:
- t.Fatalf("Want error: %v got: %v", tc.wantErr, err)
- default:
- if tc.wantErr.Error() != err.Error() {
- t.Fatalf("Want error: %v got error: %v", tc.wantErr, err)
- }
- }
-
- if len(threads) != tc.wantCount {
- t.Fatalf("Want count: %d got: %d", tc.wantCount, len(threads))
- }
- })
- }
-}
-
-func TestReverseSmoke(t *testing.T) {
- data, err := ioutil.ReadFile(allPossibleCPUs)
- if err != nil {
- t.Fatalf("Failed to read from possible: %v", err)
- }
- threads, err := getThreadsFromPossible(data)
- if err != nil {
- t.Fatalf("Could not parse possible output: %v", err)
- }
-
- if len(threads) <= 0 {
- t.Fatalf("Didn't get any CPU cores: %d", len(threads))
- }
-}
diff --git a/runsc/mitigate/mitigate.go b/runsc/mitigate/mitigate.go
index 91de623e3..24f67414c 100644
--- a/runsc/mitigate/mitigate.go
+++ b/runsc/mitigate/mitigate.go
@@ -14,121 +14,440 @@
// Package mitigate provides libraries for the mitigate command. The
// mitigate command mitigates side channel attacks such as MDS. Mitigate
-// shuts down CPUs via /sys/devices/system/cpu/cpu{N}/online. In addition,
-// the mitigate also handles computing available CPU in kubernetes kube_config
-// files.
+// shuts down CPUs via /sys/devices/system/cpu/cpu{N}/online.
package mitigate
import (
"fmt"
"io/ioutil"
-
- "gvisor.dev/gvisor/pkg/log"
- "gvisor.dev/gvisor/runsc/flag"
+ "os"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
)
const (
- cpuInfo = "/proc/cpuinfo"
- allPossibleCPUs = "/sys/devices/system/cpu/possible"
+ // mds is the only bug we care about.
+ mds = "mds"
+
+ // Constants for parsing /proc/cpuinfo.
+ processorKey = "processor"
+ vendorIDKey = "vendor_id"
+ cpuFamilyKey = "cpu family"
+ modelKey = "model"
+ physicalIDKey = "physical id"
+ coreIDKey = "core id"
+ bugsKey = "bugs"
+
+ // Path to shutdown a CPU.
+ cpuOnlineTemplate = "/sys/devices/system/cpu/cpu%d/online"
)
-// Mitigate handles high level mitigate operations provided to runsc.
-type Mitigate struct {
- dryRun bool // Run the command without changing the underlying system.
- reverse bool // Reverse mitigate by turning on all CPU cores.
- other mitigate // Struct holds extra mitigate logic.
- path string // path to read for each operation (e.g. /proc/cpuinfo).
+// CPUSet contains a map of all CPUs on the system, mapped
+// by Physical ID and CoreIDs. threads with the same
+// Core and Physical ID are Hyperthread pairs.
+type CPUSet map[threadID]*ThreadGroup
+
+// NewCPUSet creates a CPUSet from data read from /proc/cpuinfo.
+func NewCPUSet(data []byte, vulnerable func(Thread) bool) (CPUSet, error) {
+ processors, err := getThreads(string(data))
+ if err != nil {
+ return nil, err
+ }
+
+ set := make(CPUSet)
+ for _, p := range processors {
+ // Each ID is of the form physicalID:coreID. Hyperthread pairs
+ // have identical physical and core IDs. We need to match
+ // Hyperthread pairs so that we can shutdown all but one per
+ // pair.
+ core, ok := set[p.id]
+ if !ok {
+ core = &ThreadGroup{}
+ set[p.id] = core
+ }
+ core.isVulnerable = core.isVulnerable || vulnerable(p)
+ core.threads = append(core.threads, p)
+ }
+
+ // We need to make sure we shutdown the lowest number processor per
+ // thread group.
+ for _, tg := range set {
+ sort.Slice(tg.threads, func(i, j int) bool {
+ return tg.threads[i].processorNumber < tg.threads[j].processorNumber
+ })
+ }
+ return set, nil
}
-// Usage implments Usage for cmd.Mitigate.
-func (m Mitigate) Usage() string {
- usageString := `mitigate [flags]
+// NewCPUSetFromPossible makes a cpuSet data read from
+// /sys/devices/system/cpu/possible. This is used in enable operations
+// where the caller simply wants to enable all CPUS.
+func NewCPUSetFromPossible(data []byte) (CPUSet, error) {
+ threads, err := GetThreadsFromPossible(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // We don't care if a CPU is vulnerable or not, we just
+ // want to return a list of all CPUs on the host.
+ set := CPUSet{
+ threads[0].id: &ThreadGroup{
+ threads: threads,
+ isVulnerable: false,
+ },
+ }
+ return set, nil
+}
-Mitigate mitigates a system to the "MDS" vulnerability by implementing a manual shutdown of SMT. The command checks /proc/cpuinfo for cpus having the MDS vulnerability, and if found, shutdown all but one CPU per hyperthread pair via /sys/devices/system/cpu/cpu{N}/online. CPUs can be restored by writing "2" to each file in /sys/devices/system/cpu/cpu{N}/online or performing a system reboot.
+// String implements the String method for CPUSet.
+func (c CPUSet) String() string {
+ ret := ""
+ for _, tg := range c {
+ ret += fmt.Sprintf("%s\n", tg)
+ }
+ return ret
+}
-The command can be reversed with --reverse, which reads the total CPUs from /sys/devices/system/cpu/possible and enables all with /sys/devices/system/cpu/cpu{N}/online.
-`
- return usageString + m.other.usage()
+// GetRemainingList returns the list of threads that will remain active
+// after mitigation.
+func (c CPUSet) GetRemainingList() []Thread {
+ threads := make([]Thread, 0, len(c))
+ for _, core := range c {
+ // If we're vulnerable, take only one thread from the pair.
+ if core.isVulnerable {
+ threads = append(threads, core.threads[0])
+ continue
+ }
+ // Otherwise don't shutdown anything.
+ threads = append(threads, core.threads...)
+ }
+ return threads
}
-// SetFlags sets flags for the command Mitigate.
-func (m Mitigate) SetFlags(f *flag.FlagSet) {
- f.BoolVar(&m.dryRun, "dryrun", false, "run the command without changing system")
- f.BoolVar(&m.reverse, "reverse", false, "reverse mitigate by enabling all CPUs")
- m.other.setFlags(f)
- m.path = cpuInfo
- if m.reverse {
- m.path = allPossibleCPUs
+// GetShutdownList returns the list of threads that will be shutdown on
+// mitigation.
+func (c CPUSet) GetShutdownList() []Thread {
+ threads := make([]Thread, 0)
+ for _, core := range c {
+ // Only if we're vulnerable do shutdown anything. In this case,
+ // shutdown all but the first entry.
+ if core.isVulnerable && len(core.threads) > 1 {
+ threads = append(threads, core.threads[1:]...)
+ }
}
+ return threads
}
-// Execute executes the Mitigate command.
-func (m Mitigate) Execute() error {
- data, err := ioutil.ReadFile(m.path)
- if err != nil {
- return fmt.Errorf("failed to read %s: %v", m.path, err)
+// ThreadGroup represents Hyperthread pairs on the same physical/core ID.
+type ThreadGroup struct {
+ threads []Thread
+ isVulnerable bool
+}
+
+// String implements the String method for threadGroup.
+func (c ThreadGroup) String() string {
+ ret := fmt.Sprintf("ThreadGroup:\nIsVulnerable: %t\n", c.isVulnerable)
+ for _, processor := range c.threads {
+ ret += fmt.Sprintf("%s\n", processor)
}
+ return ret
+}
- if m.reverse {
- err := m.doReverse(data)
+// getThreads returns threads structs from reading /proc/cpuinfo.
+func getThreads(data string) ([]Thread, error) {
+ // Each processor entry should start with the
+ // processor key. Find the beginings of each.
+ r := buildRegex(processorKey, `\d+`)
+ indices := r.FindAllStringIndex(data, -1)
+ if len(indices) < 1 {
+ return nil, fmt.Errorf("no cpus found for: %q", data)
+ }
+
+ // Add the ending index for last entry.
+ indices = append(indices, []int{len(data), -1})
+
+ // Valid cpus are now defined by strings in between
+ // indexes (e.g. data[index[i], index[i+1]]).
+ // There should be len(indicies) - 1 CPUs
+ // since the last index is the end of the string.
+ cpus := make([]Thread, 0, len(indices))
+ // Find each string that represents a CPU. These begin "processor".
+ for i := 1; i < len(indices); i++ {
+ start := indices[i-1][0]
+ end := indices[i][0]
+ // Parse the CPU entry, which should be between start/end.
+ c, err := newThread(data[start:end])
if err != nil {
- return fmt.Errorf("reverse operation failed: %v", err)
+ return nil, err
}
- return nil
+ cpus = append(cpus, c)
+ }
+ return cpus, nil
+}
+
+// GetThreadsFromPossible makes threads from data read from /sys/devices/system/cpu/possible.
+func GetThreadsFromPossible(data []byte) ([]Thread, error) {
+ possibleRegex := regexp.MustCompile(`(?m)^(\d+)(-(\d+))?$`)
+ matches := possibleRegex.FindStringSubmatch(string(data))
+ if len(matches) != 4 {
+ return nil, fmt.Errorf("mismatch regex from possible: %q", string(data))
+ }
+
+ // If matches[3] is empty, we only have one cpu entry.
+ if matches[3] == "" {
+ matches[3] = matches[1]
}
- set, err := m.doMitigate(data)
+ begin, err := strconv.ParseInt(matches[1], 10, 64)
if err != nil {
- return fmt.Errorf("mitigate operation failed: %v", err)
+ return nil, fmt.Errorf("failed to parse begin: %v", err)
}
- return m.other.execute(set, m.dryRun)
+ end, err := strconv.ParseInt(matches[3], 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse end: %v", err)
+ }
+ if begin > end || begin < 0 || end < 0 {
+ return nil, fmt.Errorf("invalid cpu bounds from possible: begin: %d end: %d", begin, end)
+ }
+
+ ret := make([]Thread, 0, end-begin)
+ for i := begin; i <= end; i++ {
+ ret = append(ret, Thread{
+ processorNumber: i,
+ id: threadID{
+ physicalID: 0, // we don't care about id for enable ops.
+ coreID: 0,
+ },
+ })
+ }
+
+ return ret, nil
+}
+
+// threadID for each thread is defined by the physical and
+// core IDs. If equal, two threads are Hyperthread pairs.
+type threadID struct {
+ physicalID int64
+ coreID int64
}
-func (m Mitigate) doMitigate(data []byte) (cpuSet, error) {
- set, err := newCPUSet(data, m.other.vulnerable)
+// Thread represents pertinent info about a single hyperthread in a pair.
+type Thread struct {
+ processorNumber int64 // the processor number of this CPU.
+ vendorID string // the vendorID of CPU (e.g. AuthenticAMD).
+ cpuFamily int64 // CPU family number (e.g. 6 for CascadeLake/Skylake).
+ model int64 // CPU model number (e.g. 85 for CascadeLake/Skylake).
+ id threadID // id for this thread
+ bugs map[string]struct{} // map of vulnerabilities parsed from the 'bugs' field.
+}
+
+// newThread parses a CPU from a single cpu entry from /proc/cpuinfo.
+func newThread(data string) (Thread, error) {
+ empty := Thread{}
+ processor, err := parseProcessor(data)
if err != nil {
- return nil, err
+ return empty, err
}
- log.Infof("Mitigate found the following CPUs...")
- log.Infof("%s", set)
+ vendorID, err := parseVendorID(data)
+ if err != nil {
+ return empty, err
+ }
- disableList := set.getShutdownList()
- log.Infof("Disabling threads on thread pairs.")
- for _, t := range disableList {
- log.Infof("Disable thread: %s", t)
- if m.dryRun {
- continue
- }
- if err := t.disable(); err != nil {
- return nil, fmt.Errorf("error disabling thread: %s err: %v", t, err)
- }
+ cpuFamily, err := parseCPUFamily(data)
+ if err != nil {
+ return empty, err
}
- log.Infof("Shutdown successful.")
- return set, nil
+
+ model, err := parseModel(data)
+ if err != nil {
+ return empty, err
+ }
+
+ physicalID, err := parsePhysicalID(data)
+ if err != nil {
+ return empty, err
+ }
+
+ coreID, err := parseCoreID(data)
+ if err != nil {
+ return empty, err
+ }
+
+ bugs, err := parseBugs(data)
+ if err != nil {
+ return empty, err
+ }
+
+ return Thread{
+ processorNumber: processor,
+ vendorID: vendorID,
+ cpuFamily: cpuFamily,
+ model: model,
+ id: threadID{
+ physicalID: physicalID,
+ coreID: coreID,
+ },
+ bugs: bugs,
+ }, nil
+}
+
+// String implements the String method for thread.
+func (t Thread) String() string {
+ template := `CPU: %d
+CPU ID: %+v
+Vendor: %s
+Family/Model: %d/%d
+Bugs: %s
+`
+ bugs := make([]string, 0)
+ for bug := range t.bugs {
+ bugs = append(bugs, bug)
+ }
+
+ return fmt.Sprintf(template, t.processorNumber, t.id, t.vendorID, t.cpuFamily, t.model, strings.Join(bugs, ","))
+}
+
+// Enable turns on the CPU by writing 1 to /sys/devices/cpu/cpu{N}/online.
+func (t Thread) Enable() error {
+ // Linux ensures that "cpu0" is always online.
+ if t.processorNumber == 0 {
+ return nil
+ }
+ cpuPath := fmt.Sprintf(cpuOnlineTemplate, t.processorNumber)
+ f, err := os.OpenFile(cpuPath, os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return fmt.Errorf("failed to open file %s: %v", cpuPath, err)
+ }
+ if _, err = f.Write([]byte{'1'}); err != nil {
+ return fmt.Errorf("failed to write '1' to %s: %v", cpuPath, err)
+ }
+ return nil
+}
+
+// Disable turns off the CPU by writing 0 to /sys/devices/cpu/cpu{N}/online.
+func (t Thread) Disable() error {
+ // The core labeled "cpu0" can never be taken offline via this method.
+ // Linux will return EPERM if the user even creates a file at the /sys
+ // path above.
+ if t.processorNumber == 0 {
+ return fmt.Errorf("invalid shutdown operation: cpu0 cannot be disabled")
+ }
+ cpuPath := fmt.Sprintf(cpuOnlineTemplate, t.processorNumber)
+ return ioutil.WriteFile(cpuPath, []byte{'0'}, 0644)
}
-func (m Mitigate) doReverse(data []byte) error {
- set, err := newCPUSetFromPossible(data)
+// IsVulnerable checks if a CPU is vulnerable to mds.
+func (t Thread) IsVulnerable() bool {
+ _, ok := t.bugs[mds]
+ return ok
+}
+
+// isActive checks if a CPU is active from /sys/devices/system/cpu/cpu{N}/online
+// If the file does not exist (ioutil returns in error), we assume the CPU is on.
+func (t Thread) isActive() bool {
+ cpuPath := fmt.Sprintf(cpuOnlineTemplate, t.processorNumber)
+ data, err := ioutil.ReadFile(cpuPath)
if err != nil {
- return err
+ return true
}
+ return len(data) > 0 && data[0] != '0'
+}
- log.Infof("Reverse mitigate found the following CPUs...")
- log.Infof("%s", set)
+// SimilarTo checks family/model/bugs fields for equality of two
+// processors.
+func (t Thread) SimilarTo(other Thread) bool {
+ if t.vendorID != other.vendorID {
+ return false
+ }
- enableList := set.getRemainingList()
+ if other.cpuFamily != t.cpuFamily {
+ return false
+ }
- log.Infof("Enabling all CPUs...")
- for _, t := range enableList {
- log.Infof("Enabling thread: %s", t)
- if m.dryRun {
- continue
- }
- if err := t.enable(); err != nil {
- return fmt.Errorf("error enabling thread: %s err: %v", t, err)
+ if other.model != t.model {
+ return false
+ }
+
+ if len(other.bugs) != len(t.bugs) {
+ return false
+ }
+
+ for bug := range t.bugs {
+ if _, ok := other.bugs[bug]; !ok {
+ return false
}
}
- log.Infof("Enable successful.")
- return nil
+ return true
+}
+
+// parseProcessor grabs the processor field from /proc/cpuinfo output.
+func parseProcessor(data string) (int64, error) {
+ return parseIntegerResult(data, processorKey)
+}
+
+// parseVendorID grabs the vendor_id field from /proc/cpuinfo output.
+func parseVendorID(data string) (string, error) {
+ return parseRegex(data, vendorIDKey, `[\w\d]+`)
+}
+
+// parseCPUFamily grabs the cpu family field from /proc/cpuinfo output.
+func parseCPUFamily(data string) (int64, error) {
+ return parseIntegerResult(data, cpuFamilyKey)
+}
+
+// parseModel grabs the model field from /proc/cpuinfo output.
+func parseModel(data string) (int64, error) {
+ return parseIntegerResult(data, modelKey)
+}
+
+// parsePhysicalID parses the physical id field.
+func parsePhysicalID(data string) (int64, error) {
+ return parseIntegerResult(data, physicalIDKey)
+}
+
+// parseCoreID parses the core id field.
+func parseCoreID(data string) (int64, error) {
+ return parseIntegerResult(data, coreIDKey)
+}
+
+// parseBugs grabs the bugs field from /proc/cpuinfo output.
+func parseBugs(data string) (map[string]struct{}, error) {
+ result, err := parseRegex(data, bugsKey, `[\d\w\s]*`)
+ if err != nil {
+ return nil, err
+ }
+ bugs := strings.Split(result, " ")
+ ret := make(map[string]struct{}, len(bugs))
+ for _, bug := range bugs {
+ ret[bug] = struct{}{}
+ }
+ return ret, nil
+}
+
+// parseIntegerResult parses fields expecting an integer.
+func parseIntegerResult(data, key string) (int64, error) {
+ result, err := parseRegex(data, key, `\d+`)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseInt(result, 0, 64)
+}
+
+// buildRegex builds a regex for parsing each CPU field.
+func buildRegex(key, match string) *regexp.Regexp {
+ reg := fmt.Sprintf(`(?m)^%s\s*:\s*(.*)$`, key)
+ return regexp.MustCompile(reg)
+}
+
+// parseRegex parses data with key inserted into a standard regex template.
+func parseRegex(data, key, match string) (string, error) {
+ r := buildRegex(key, match)
+ matches := r.FindStringSubmatch(data)
+ if len(matches) < 2 {
+ return "", fmt.Errorf("failed to match key %q: %q", key, data)
+ }
+ return matches[1], nil
}
diff --git a/runsc/mitigate/mitigate_test.go b/runsc/mitigate/mitigate_test.go
index b3a9a9b18..fbd8eb886 100644
--- a/runsc/mitigate/mitigate_test.go
+++ b/runsc/mitigate/mitigate_test.go
@@ -17,138 +17,519 @@ package mitigate
import (
"fmt"
"io/ioutil"
- "os"
"strings"
"testing"
+
+ "gvisor.dev/gvisor/runsc/mitigate/mock"
)
-type executeTestCase struct {
- name string
- mitigateData string
- mitigateError error
- reverseData string
- reverseError error
+// TestMockCPUSet tests mock cpu test cases against the cpuSet functions.
+func TestMockCPUSet(t *testing.T) {
+ for _, tc := range []struct {
+ testCase mock.CPU
+ isVulnerable bool
+ }{
+ {
+ testCase: mock.AMD8,
+ isVulnerable: false,
+ },
+ {
+ testCase: mock.Haswell2,
+ isVulnerable: true,
+ },
+ {
+ testCase: mock.Haswell2core,
+ isVulnerable: true,
+ },
+ {
+ testCase: mock.CascadeLake2,
+ isVulnerable: true,
+ },
+ {
+ testCase: mock.CascadeLake4,
+ isVulnerable: true,
+ },
+ } {
+ t.Run(tc.testCase.Name, func(t *testing.T) {
+ data := tc.testCase.MakeCPUString()
+ vulnerable := func(t Thread) bool {
+ return t.IsVulnerable()
+ }
+ set, err := NewCPUSet([]byte(data), vulnerable)
+ if err != nil {
+ t.Fatalf("Failed to create cpuSet: %v", err)
+ }
+
+ for _, tg := range set {
+ if err := checkSorted(tg.threads); err != nil {
+ t.Fatalf("Failed to sort cpuSet: %v", err)
+ }
+ }
+
+ remaining := set.GetRemainingList()
+ // In the non-vulnerable case, no cores should be shutdown so all should remain.
+ want := tc.testCase.PhysicalCores * tc.testCase.Cores * tc.testCase.ThreadsPerCore
+ if tc.isVulnerable {
+ want = tc.testCase.PhysicalCores * tc.testCase.Cores
+ }
+
+ if want != len(remaining) {
+ t.Fatalf("Failed to shutdown the correct number of cores: want: %d got: %d", want, len(remaining))
+ }
+
+ if !tc.isVulnerable {
+ return
+ }
+
+ // If the set is vulnerable, we expect only 1 thread per hyperthread pair.
+ for _, r := range remaining {
+ if _, ok := set[r.id]; !ok {
+ t.Fatalf("Entry %+v not in map, there must be two entries in the same thread group.", r)
+ }
+ delete(set, r.id)
+ }
+
+ possible := tc.testCase.MakeSysPossibleString()
+ set, err = NewCPUSetFromPossible([]byte(possible))
+ if err != nil {
+ t.Fatalf("Failed to make cpuSet: %v", err)
+ }
+
+ want = tc.testCase.PhysicalCores * tc.testCase.Cores * tc.testCase.ThreadsPerCore
+ got := len(set.GetRemainingList())
+ if got != want {
+ t.Fatalf("Returned the wrong number of CPUs want: %d got: %d", want, got)
+ }
+ })
+ }
}
-func TestExecute(t *testing.T) {
+// TestGetCPU tests basic parsing of single CPU strings from reading
+// /proc/cpuinfo.
+func TestGetCPU(t *testing.T) {
+ data := `processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 85
+physical id: 0
+core id : 0
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa itlb_multihit
+`
+ want := Thread{
+ processorNumber: 0,
+ vendorID: "GenuineIntel",
+ cpuFamily: 6,
+ model: 85,
+ id: threadID{
+ physicalID: 0,
+ coreID: 0,
+ },
+ bugs: map[string]struct{}{
+ "cpu_meltdown": struct{}{},
+ "spectre_v1": struct{}{},
+ "spectre_v2": struct{}{},
+ "spec_store_bypass": struct{}{},
+ "l1tf": struct{}{},
+ "mds": struct{}{},
+ "swapgs": struct{}{},
+ "taa": struct{}{},
+ "itlb_multihit": struct{}{},
+ },
+ }
- partial := `processor : 1
-vendor_id : AuthenticAMD
-cpu family : 23
-model : 49
-model name : AMD EPYC 7B12
-physical id : 0
-bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass
+ got, err := newThread(data)
+ if err != nil {
+ t.Fatalf("getCpu failed with error: %v", err)
+ }
+
+ if !want.SimilarTo(got) {
+ t.Fatalf("Failed cpus not similar: got: %+v, want: %+v", got, want)
+ }
+
+ if !got.IsVulnerable() {
+ t.Fatalf("Failed: cpu should be vulnerable.")
+ }
+}
+
+func TestInvalid(t *testing.T) {
+ result, err := getThreads(`something not a processor`)
+ if err == nil {
+ t.Fatalf("getCPU set didn't return an error: %+v", result)
+ }
+
+ if !strings.Contains(err.Error(), "no cpus") {
+ t.Fatalf("Incorrect error returned: %v", err)
+ }
+}
+
+// TestCPUSet tests getting the right number of CPUs from
+// parsing full output of /proc/cpuinfo.
+func TestCPUSet(t *testing.T) {
+ data := `processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 63
+model name : Intel(R) Xeon(R) CPU @ 2.30GHz
+stepping : 0
+microcode : 0x1
+cpu MHz : 2299.998
+cache size : 46080 KB
+physical id : 0
+siblings : 2
+core id : 0
+cpu cores : 1
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
+bogomips : 4599.99
+clflush size : 64
+cache_alignment : 64
+address sizes : 46 bits physical, 48 bits virtual
+power management:
+
+processor : 1
+vendor_id : GenuineIntel
+cpu family : 6
+model : 63
+model name : Intel(R) Xeon(R) CPU @ 2.30GHz
+stepping : 0
+microcode : 0x1
+cpu MHz : 2299.998
+cache size : 46080 KB
+physical id : 0
+siblings : 2
+core id : 0
+cpu cores : 1
+apicid : 1
+initial apicid : 1
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
+bogomips : 4599.99
+clflush size : 64
+cache_alignment : 64
+address sizes : 46 bits physical, 48 bits virtual
power management:
`
+ cpuSet, err := getThreads(data)
+ if err != nil {
+ t.Fatalf("getCPUSet failed: %v", err)
+ }
- for _, tc := range []executeTestCase{
- {
- name: "CascadeLake4",
- mitigateData: cascadeLake4.makeCPUString(),
- reverseData: cascadeLake4.makeSysPossibleString(),
- },
- {
- name: "Empty",
- mitigateData: "",
- mitigateError: fmt.Errorf(`mitigate operation failed: no cpus found for: ""`),
- reverseData: "",
- reverseError: fmt.Errorf(`reverse operation failed: mismatch regex from %s: ""`, allPossibleCPUs),
+ wantCPULen := 2
+ if len(cpuSet) != wantCPULen {
+ t.Fatalf("Num CPU mismatch: want: %d, got: %d", wantCPULen, len(cpuSet))
+ }
+
+ wantCPU := Thread{
+ vendorID: "GenuineIntel",
+ cpuFamily: 6,
+ model: 63,
+ bugs: map[string]struct{}{
+ "cpu_meltdown": struct{}{},
+ "spectre_v1": struct{}{},
+ "spectre_v2": struct{}{},
+ "spec_store_bypass": struct{}{},
+ "l1tf": struct{}{},
+ "mds": struct{}{},
+ "swapgs": struct{}{},
},
- {
- name: "Partial",
- mitigateData: `processor : 0
+ }
+
+ for _, c := range cpuSet {
+ if !wantCPU.SimilarTo(c) {
+ t.Fatalf("Failed cpus not equal: got: %+v, want: %+v", c, wantCPU)
+ }
+ }
+}
+
+// TestReadFile is a smoke test for parsing methods.
+func TestReadFile(t *testing.T) {
+ data, err := ioutil.ReadFile("/proc/cpuinfo")
+ if err != nil {
+ t.Fatalf("Failed to read cpuinfo: %v", err)
+ }
+
+ vulnerable := func(t Thread) bool {
+ return t.IsVulnerable()
+ }
+
+ set, err := NewCPUSet(data, vulnerable)
+ if err != nil {
+ t.Fatalf("Failed to parse CPU data %v\n%s", err, data)
+ }
+
+ for _, tg := range set {
+ if err := checkSorted(tg.threads); err != nil {
+ t.Fatalf("Failed to sort cpuSet: %v", err)
+ }
+ }
+
+ if len(set) < 1 {
+ t.Fatalf("Failed to parse any CPUs: %d", len(set))
+ }
+
+ t.Log(set)
+}
+
+// TestVulnerable tests if the isVulnerable method is correct
+// among known CPUs in GCP.
+func TestVulnerable(t *testing.T) {
+ const haswell = `processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 63
+model name : Intel(R) Xeon(R) CPU @ 2.30GHz
+stepping : 0
+microcode : 0x1
+cpu MHz : 2299.998
+cache size : 46080 KB
+physical id : 0
+siblings : 4
+core id : 0
+cpu cores : 2
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid xsaveopt arat md_clear arch_capabilities
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs
+bogomips : 4599.99
+clflush size : 64
+cache_alignment : 64
+address sizes : 46 bits physical, 48 bits virtual
+power management:`
+
+ const skylake = `processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 85
+model name : Intel(R) Xeon(R) CPU @ 2.00GHz
+stepping : 3
+microcode : 0x1
+cpu MHz : 2000.180
+cache size : 39424 KB
+physical id : 0
+siblings : 2
+core id : 0
+cpu cores : 1
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single pti ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves arat md_clear arch_capabilities
+bugs : cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs taa
+bogomips : 4000.36
+clflush size : 64
+cache_alignment : 64
+address sizes : 46 bits physical, 48 bits virtual
+power management:`
+
+ const cascade = `processor : 0
+vendor_id : GenuineIntel
+cpu family : 6
+model : 85
+model name : Intel(R) Xeon(R) CPU
+stepping : 7
+microcode : 0x1
+cpu MHz : 2800.198
+cache size : 33792 KB
+physical id : 0
+siblings : 2
+core id : 0
+cpu cores : 1
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2
+ ss ht syscall nx pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid tsc_known_freq pni pclmu
+lqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowpr
+efetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid r
+tm mpx avx512f avx512dq rdseed adx smap clflushopt clwb avx512cd avx512bw avx512vl xsaveopt xsavec xgetbv1 xsaves a
+rat avx512_vnni md_clear arch_capabilities
+bugs : spectre_v1 spectre_v2 spec_store_bypass mds swapgs taa
+bogomips : 5600.39
+clflush size : 64
+cache_alignment : 64
+address sizes : 46 bits physical, 48 bits virtual
+power management:`
+
+ const amd = `processor : 0
vendor_id : AuthenticAMD
cpu family : 23
model : 49
model name : AMD EPYC 7B12
+stepping : 0
+microcode : 0x1000065
+cpu MHz : 2250.000
+cache size : 512 KB
physical id : 0
+siblings : 2
core id : 0
cpu cores : 1
+apicid : 0
+initial apicid : 0
+fpu : yes
+fpu_exception : yes
+cpuid level : 13
+wp : yes
+flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl xtopology nonstop_tsc cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw topoext ssbd ibrs ibpb stibp vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt clwb sha_ni xsaveopt xsavec xgetbv1 clzero xsaveerptr arat npt nrip_save umip rdpid
bugs : sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass
-power management:
+bogomips : 4500.00
+TLB size : 3072 4K pages
+clflush size : 64
+cache_alignment : 64
+address sizes : 48 bits physical, 48 bits virtual
+power management:`
-` + partial,
- mitigateError: fmt.Errorf(`mitigate operation failed: failed to match key "core id": %q`, partial),
- reverseData: "1-",
- reverseError: fmt.Errorf(`reverse operation failed: mismatch regex from %s: %q`, allPossibleCPUs, "1-"),
+ for _, tc := range []struct {
+ name string
+ cpuString string
+ vulnerable bool
+ }{
+ {
+ name: "haswell",
+ cpuString: haswell,
+ vulnerable: true,
+ }, {
+ name: "skylake",
+ cpuString: skylake,
+ vulnerable: true,
+ }, {
+ name: "amd",
+ cpuString: amd,
+ vulnerable: false,
},
} {
- doExecuteTest(t, Mitigate{}, tc)
+ t.Run(tc.name, func(t *testing.T) {
+ set, err := getThreads(tc.cpuString)
+ if err != nil {
+ t.Fatalf("Failed to getCPUSet:%v\n %s", err, tc.cpuString)
+ }
+
+ if len(set) < 1 {
+ t.Fatalf("Returned empty cpu set: %v", set)
+ }
+
+ for _, c := range set {
+ got := func() bool {
+ return c.IsVulnerable()
+ }()
+
+ if got != tc.vulnerable {
+ t.Fatalf("Mismatch vulnerable for cpu %+s: got %t want: %t", tc.name, tc.vulnerable, got)
+ }
+ }
+ })
}
}
-func TestExecuteSmoke(t *testing.T) {
- smokeMitigate, err := ioutil.ReadFile(cpuInfo)
+func TestReverse(t *testing.T) {
+ const noParse = "-1-"
+ for _, tc := range []struct {
+ name string
+ output string
+ wantErr error
+ wantCount int
+ }{
+ {
+ name: "base",
+ output: "0-7",
+ wantErr: nil,
+ wantCount: 8,
+ },
+ {
+ name: "huge",
+ output: "0-111",
+ wantErr: nil,
+ wantCount: 112,
+ },
+ {
+ name: "not zero",
+ output: "50-53",
+ wantErr: nil,
+ wantCount: 4,
+ },
+ {
+ name: "small",
+ output: "0",
+ wantErr: nil,
+ wantCount: 1,
+ },
+ {
+ name: "invalid order",
+ output: "10-6",
+ wantErr: fmt.Errorf("invalid cpu bounds from possible: begin: %d end: %d", 10, 6),
+ },
+ {
+ name: "no parse",
+ output: noParse,
+ wantErr: fmt.Errorf(`mismatch regex from possible: %q`, noParse),
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ threads, err := GetThreadsFromPossible([]byte(tc.output))
+
+ switch {
+ case tc.wantErr == nil:
+ if err != nil {
+ t.Fatalf("Wanted nil err, got: %v", err)
+ }
+ case err == nil:
+ t.Fatalf("Want error: %v got: %v", tc.wantErr, err)
+ default:
+ if tc.wantErr.Error() != err.Error() {
+ t.Fatalf("Want error: %v got error: %v", tc.wantErr, err)
+ }
+ }
+
+ if len(threads) != tc.wantCount {
+ t.Fatalf("Want count: %d got: %d", tc.wantCount, len(threads))
+ }
+ })
+ }
+}
+
+func TestReverseSmoke(t *testing.T) {
+ data, err := ioutil.ReadFile("/sys/devices/system/cpu/possible")
if err != nil {
- t.Fatalf("Failed to read %s: %v", cpuInfo, err)
+ t.Fatalf("Failed to read from possible: %v", err)
}
- smokeReverse, err := ioutil.ReadFile(allPossibleCPUs)
+ threads, err := GetThreadsFromPossible(data)
if err != nil {
- t.Fatalf("Failed to read %s: %v", allPossibleCPUs, err)
+ t.Fatalf("Could not parse possible output: %v", err)
}
- doExecuteTest(t, Mitigate{}, executeTestCase{
- name: "SmokeTest",
- mitigateData: string(smokeMitigate),
- reverseData: string(smokeReverse),
- })
+ if len(threads) <= 0 {
+ t.Fatalf("Didn't get any CPU cores: %d", len(threads))
+ }
}
-// doExecuteTest runs Execute with the mitigate operation and reverse operation.
-func doExecuteTest(t *testing.T, m Mitigate, tc executeTestCase) {
- t.Run("Mitigate"+tc.name, func(t *testing.T) {
- m.dryRun = true
- file, err := ioutil.TempFile("", "outfile.txt")
- if err != nil {
- t.Fatalf("Failed to create tmpfile: %v", err)
- }
- defer os.Remove(file.Name())
-
- if _, err := file.WriteString(tc.mitigateData); err != nil {
- t.Fatalf("Failed to write to file: %v", err)
- }
-
- m.path = file.Name()
-
- got := m.Execute()
- if err = checkErr(tc.mitigateError, got); err != nil {
- t.Fatalf("Mitigate error mismatch: %v", err)
- }
- })
- t.Run("Reverse"+tc.name, func(t *testing.T) {
- m.dryRun = true
- m.reverse = true
-
- file, err := ioutil.TempFile("", "outfile.txt")
- if err != nil {
- t.Fatalf("Failed to create tmpfile: %v", err)
- }
- defer os.Remove(file.Name())
-
- if _, err := file.WriteString(tc.reverseData); err != nil {
- t.Fatalf("Failed to write to file: %v", err)
- }
-
- m.path = file.Name()
- got := m.Execute()
- if err = checkErr(tc.reverseError, got); err != nil {
- t.Fatalf("Mitigate error mismatch: %v", err)
+func checkSorted(threads []Thread) error {
+ if len(threads) < 2 {
+ return nil
+ }
+ last := threads[0].processorNumber
+ for _, t := range threads[1:] {
+ if last >= t.processorNumber {
+ return fmt.Errorf("threads out of order: thread %d before %d", t.processorNumber, last)
}
- })
-
-}
-
-// checkErr checks error for equality.
-func checkErr(want, got error) error {
- switch {
- case want == nil && got == nil:
- case want != nil && got == nil:
- fallthrough
- case want == nil && got != nil:
- fallthrough
- case want.Error() != strings.Trim(got.Error(), " "):
- return fmt.Errorf("got: %v want: %v", got, want)
+ last = t.processorNumber
}
return nil
}
diff --git a/runsc/mitigate/mock/BUILD b/runsc/mitigate/mock/BUILD
new file mode 100644
index 000000000..5019ff9ee
--- /dev/null
+++ b/runsc/mitigate/mock/BUILD
@@ -0,0 +1,11 @@
+load("//tools:defs.bzl", "go_library")
+
+package(licenses = ["notice"])
+
+go_library(
+ name = "mock",
+ srcs = ["mock.go"],
+ visibility = [
+ "//runsc:__subpackages__",
+ ],
+)
diff --git a/runsc/mitigate/mock/mock.go b/runsc/mitigate/mock/mock.go
new file mode 100644
index 000000000..2db718cb9
--- /dev/null
+++ b/runsc/mitigate/mock/mock.go
@@ -0,0 +1,141 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package mock contains mock CPUs for mitigate tests.
+package mock
+
+import "fmt"
+
+// CPU represents data from CPUs that will be mitigated.
+type CPU struct {
+ Name string
+ VendorID string
+ Family int
+ Model int
+ ModelName string
+ Bugs string
+ PhysicalCores int
+ Cores int
+ ThreadsPerCore int
+}
+
+// CascadeLake2 is a two core Intel CascadeLake machine.
+var CascadeLake2 = CPU{
+ Name: "CascadeLake",
+ VendorID: "GenuineIntel",
+ Family: 6,
+ Model: 85,
+ ModelName: "Intel(R) Xeon(R) CPU",
+ Bugs: "spectre_v1 spectre_v2 spec_store_bypass mds swapgs taa",
+ PhysicalCores: 1,
+ Cores: 1,
+ ThreadsPerCore: 2,
+}
+
+// CascadeLake4 is a four core Intel CascadeLake machine.
+var CascadeLake4 = CPU{
+ Name: "CascadeLake",
+ VendorID: "GenuineIntel",
+ Family: 6,
+ Model: 85,
+ ModelName: "Intel(R) Xeon(R) CPU",
+ Bugs: "spectre_v1 spectre_v2 spec_store_bypass mds swapgs taa",
+ PhysicalCores: 1,
+ Cores: 2,
+ ThreadsPerCore: 2,
+}
+
+// Haswell2 is a two core Intel Haswell machine.
+var Haswell2 = CPU{
+ Name: "Haswell",
+ VendorID: "GenuineIntel",
+ Family: 6,
+ Model: 63,
+ ModelName: "Intel(R) Xeon(R) CPU",
+ Bugs: "cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs",
+ PhysicalCores: 1,
+ Cores: 1,
+ ThreadsPerCore: 2,
+}
+
+// Haswell2core is a 2 core Intel Haswell machine with no hyperthread pairs.
+var Haswell2core = CPU{
+ Name: "Haswell2Physical",
+ VendorID: "GenuineIntel",
+ Family: 6,
+ Model: 63,
+ ModelName: "Intel(R) Xeon(R) CPU",
+ Bugs: "cpu_meltdown spectre_v1 spectre_v2 spec_store_bypass l1tf mds swapgs",
+ PhysicalCores: 2,
+ Cores: 1,
+ ThreadsPerCore: 1,
+}
+
+// AMD8 is an eight core AMD machine.
+var AMD8 = CPU{
+ Name: "AMD",
+ VendorID: "AuthenticAMD",
+ Family: 23,
+ Model: 49,
+ ModelName: "AMD EPYC 7B12",
+ Bugs: "sysret_ss_attrs spectre_v1 spectre_v2 spec_store_bypass",
+ PhysicalCores: 4,
+ Cores: 1,
+ ThreadsPerCore: 2,
+}
+
+// MakeCPUString makes a string formated like /proc/cpuinfo for each cpuTestCase
+func (tc CPU) MakeCPUString() string {
+ template := `processor : %d
+vendor_id : %s
+cpu family : %d
+model : %d
+model name : %s
+physical id : %d
+core id : %d
+cpu cores : %d
+bugs : %s
+
+`
+
+ ret := ``
+ for i := 0; i < tc.PhysicalCores; i++ {
+ for j := 0; j < tc.Cores; j++ {
+ for k := 0; k < tc.ThreadsPerCore; k++ {
+ processorNum := (i*tc.Cores+j)*tc.ThreadsPerCore + k
+ ret += fmt.Sprintf(template,
+ processorNum, /*processor*/
+ tc.VendorID, /*vendor_id*/
+ tc.Family, /*cpu family*/
+ tc.Model, /*model*/
+ tc.ModelName, /*model name*/
+ i, /*physical id*/
+ j, /*core id*/
+ tc.Cores*tc.PhysicalCores, /*cpu cores*/
+ tc.Bugs, /*bugs*/
+ )
+ }
+ }
+ }
+ return ret
+}
+
+// MakeSysPossibleString makes a string representing a the contents of /sys/devices/system/cpu/possible.
+func (tc CPU) MakeSysPossibleString() string {
+ max := tc.PhysicalCores * tc.Cores * tc.ThreadsPerCore
+ if max == 1 {
+ return "0"
+ }
+ return fmt.Sprintf("0-%d", max-1)
+}
diff --git a/runsc/sandbox/network.go b/runsc/sandbox/network.go
index 9e429f7d5..f69558021 100644
--- a/runsc/sandbox/network.go
+++ b/runsc/sandbox/network.go
@@ -21,7 +21,6 @@ import (
"path/filepath"
"runtime"
"strconv"
- "syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/vishvananda/netlink"
@@ -102,11 +101,11 @@ func joinNetNS(nsPath string) (func(), error) {
// isRootNS determines whether we are running in the root net namespace.
// /proc/sys/net/core/rmem_default only exists in root network namespace.
func isRootNS() (bool, error) {
- err := syscall.Access("/proc/sys/net/core/rmem_default", syscall.F_OK)
+ err := unix.Access("/proc/sys/net/core/rmem_default", unix.F_OK)
switch err {
case nil:
return true, nil
- case syscall.ENOENT:
+ case unix.ENOENT:
return false, nil
default:
return false, fmt.Errorf("failed to access /proc/sys/net/core/rmem_default: %v", err)
@@ -270,17 +269,17 @@ type socketEntry struct {
func createSocket(iface net.Interface, ifaceLink netlink.Link, enableGSO bool) (*socketEntry, error) {
// Create the socket.
const protocol = 0x0300 // htons(ETH_P_ALL)
- fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, protocol)
+ fd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_RAW, protocol)
if err != nil {
return nil, fmt.Errorf("unable to create raw socket: %v", err)
}
deviceFile := os.NewFile(uintptr(fd), "raw-device-fd")
// Bind to the appropriate device.
- ll := syscall.SockaddrLinklayer{
+ ll := unix.SockaddrLinklayer{
Protocol: protocol,
Ifindex: iface.Index,
}
- if err := syscall.Bind(fd, &ll); err != nil {
+ if err := unix.Bind(fd, &ll); err != nil {
return nil, fmt.Errorf("unable to bind to %q: %v", iface.Name, err)
}
@@ -291,7 +290,7 @@ func createSocket(iface net.Interface, ifaceLink netlink.Link, enableGSO bool) (
return nil, fmt.Errorf("getting GSO for interface %q: %v", iface.Name, err)
}
if gso {
- if err := syscall.SetsockoptInt(fd, syscall.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil {
+ if err := unix.SetsockoptInt(fd, unix.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil {
return nil, fmt.Errorf("unable to enable the PACKET_VNET_HDR option: %v", err)
}
gsoMaxSize = ifaceLink.Attrs().GSOMaxSize
@@ -307,18 +306,18 @@ func createSocket(iface net.Interface, ifaceLink netlink.Link, enableGSO bool) (
// incurring packet drops.
const bufSize = 4 << 20 // 4MB.
- if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUFFORCE, bufSize); err != nil {
- syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF, bufSize)
- sz, _ := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF)
+ if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUFFORCE, bufSize); err != nil {
+ unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF, bufSize)
+ sz, _ := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF)
if sz < bufSize {
log.Warningf("Failed to increase rcv buffer to %d on SOCK_RAW on %s. Current buffer %d: %v", bufSize, iface.Name, sz, err)
}
}
- if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_SNDBUFFORCE, bufSize); err != nil {
- syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_SNDBUF, bufSize)
- sz, _ := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_SNDBUF)
+ if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUFFORCE, bufSize); err != nil {
+ unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF, bufSize)
+ sz, _ := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF)
if sz < bufSize {
log.Warningf("Failed to increase snd buffer to %d on SOCK_RAW on %s. Curent buffer %d: %v", bufSize, iface.Name, sz, err)
}
diff --git a/runsc/sandbox/network_unsafe.go b/runsc/sandbox/network_unsafe.go
index 2a2a0fb7e..1b808a8a0 100644
--- a/runsc/sandbox/network_unsafe.go
+++ b/runsc/sandbox/network_unsafe.go
@@ -15,7 +15,6 @@
package sandbox
import (
- "syscall"
"unsafe"
"golang.org/x/sys/unix"
@@ -48,7 +47,7 @@ func isGSOEnabled(fd int, intf string) (bool, error) {
ifrData: &val,
}
- if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(fd), unix.SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))); err != 0 {
+ if _, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), unix.SIOCETHTOOL, uintptr(unsafe.Pointer(&ifr))); err != 0 {
return false, err
}
diff --git a/runsc/sandbox/sandbox.go b/runsc/sandbox/sandbox.go
index 7fe65c7ba..450f92645 100644
--- a/runsc/sandbox/sandbox.go
+++ b/runsc/sandbox/sandbox.go
@@ -30,6 +30,7 @@ import (
"github.com/cenkalti/backoff"
specs "github.com/opencontainers/runtime-spec/specs-go"
"github.com/syndtr/gocapability/capability"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/cleanup"
"gvisor.dev/gvisor/pkg/control/client"
"gvisor.dev/gvisor/pkg/control/server"
@@ -83,7 +84,7 @@ type Sandbox struct {
// child==true and the sandbox was waited on. This field allows for multiple
// threads to wait on sandbox and get the exit code, since Linux will return
// WaitStatus to one of the waiters only.
- status syscall.WaitStatus
+ status unix.WaitStatus
}
// Args is used to configure a new sandbox.
@@ -383,7 +384,7 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
binPath := specutils.ExePath
cmd := exec.Command(binPath, conf.ToFlags()...)
- cmd.SysProcAttr = &syscall.SysProcAttr{}
+ cmd.SysProcAttr = &unix.SysProcAttr{}
// Open the log files to pass to the sandbox as FDs.
//
@@ -739,7 +740,7 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
if args.Attached {
// Kill sandbox if parent process exits in attached mode.
- cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL
+ cmd.SysProcAttr.Pdeathsig = unix.SIGKILL
// Tells boot that any process it creates must have pdeathsig set.
cmd.Args = append(cmd.Args, "--attached")
}
@@ -762,7 +763,7 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
//
// NOTE: The error message is checked because error types are lost over
// rpc calls.
- if strings.Contains(err.Error(), syscall.EACCES.Error()) {
+ if strings.Contains(err.Error(), unix.EACCES.Error()) {
if permsErr := checkBinaryPermissions(conf); permsErr != nil {
return fmt.Errorf("%v: %v", err, permsErr)
}
@@ -782,7 +783,7 @@ func (s *Sandbox) createSandboxProcess(conf *config.Config, args *Args, startSyn
}
// Wait waits for the containerized process to exit, and returns its WaitStatus.
-func (s *Sandbox) Wait(cid string) (syscall.WaitStatus, error) {
+func (s *Sandbox) Wait(cid string) (unix.WaitStatus, error) {
log.Debugf("Waiting for container %q in sandbox %q", cid, s.ID)
if conn, err := s.sandboxConnect(); err != nil {
@@ -790,14 +791,14 @@ func (s *Sandbox) Wait(cid string) (syscall.WaitStatus, error) {
// There is nothing we can do for subcontainers. For the init container, we
// can try to get the sandbox exit code.
if !s.IsRootContainer(cid) {
- return syscall.WaitStatus(0), err
+ return unix.WaitStatus(0), err
}
log.Warningf("Wait on container %q failed: %v. Will try waiting on the sandbox process instead.", cid, err)
} else {
defer conn.Close()
// Try the Wait RPC to the sandbox.
- var ws syscall.WaitStatus
+ var ws unix.WaitStatus
err = conn.Call(boot.ContainerWait, &cid, &ws)
if err == nil {
// It worked!
@@ -805,7 +806,7 @@ func (s *Sandbox) Wait(cid string) (syscall.WaitStatus, error) {
}
// See comment above.
if !s.IsRootContainer(cid) {
- return syscall.WaitStatus(0), err
+ return unix.WaitStatus(0), err
}
// The sandbox may have exited after we connected, but before
@@ -817,10 +818,10 @@ func (s *Sandbox) Wait(cid string) (syscall.WaitStatus, error) {
// The best we can do is ask Linux what the sandbox exit status was, since in
// most cases that will be the same as the container exit status.
if err := s.waitForStopped(); err != nil {
- return syscall.WaitStatus(0), err
+ return unix.WaitStatus(0), err
}
if !s.child {
- return syscall.WaitStatus(0), fmt.Errorf("sandbox no longer running and its exit status is unavailable")
+ return unix.WaitStatus(0), fmt.Errorf("sandbox no longer running and its exit status is unavailable")
}
s.statusMu.Lock()
@@ -830,9 +831,9 @@ func (s *Sandbox) Wait(cid string) (syscall.WaitStatus, error) {
// WaitPID waits for process 'pid' in the container's sandbox and returns its
// WaitStatus.
-func (s *Sandbox) WaitPID(cid string, pid int32) (syscall.WaitStatus, error) {
+func (s *Sandbox) WaitPID(cid string, pid int32) (unix.WaitStatus, error) {
log.Debugf("Waiting for PID %d in sandbox %q", pid, s.ID)
- var ws syscall.WaitStatus
+ var ws unix.WaitStatus
conn, err := s.sandboxConnect()
if err != nil {
return ws, err
@@ -861,7 +862,7 @@ func (s *Sandbox) destroy() error {
log.Debugf("Destroy sandbox %q", s.ID)
if s.Pid != 0 {
log.Debugf("Killing sandbox %q", s.ID)
- if err := syscall.Kill(s.Pid, syscall.SIGKILL); err != nil && err != syscall.ESRCH {
+ if err := unix.Kill(s.Pid, unix.SIGKILL); err != nil && err != unix.ESRCH {
return fmt.Errorf("killing sandbox %q PID %q: %v", s.ID, s.Pid, err)
}
if err := s.waitForStopped(); err != nil {
@@ -875,7 +876,7 @@ func (s *Sandbox) destroy() error {
// SignalContainer sends the signal to a container in the sandbox. If all is
// true and signal is SIGKILL, then waits for all processes to exit before
// returning.
-func (s *Sandbox) SignalContainer(cid string, sig syscall.Signal, all bool) error {
+func (s *Sandbox) SignalContainer(cid string, sig unix.Signal, all bool) error {
log.Debugf("Signal sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
@@ -903,7 +904,7 @@ func (s *Sandbox) SignalContainer(cid string, sig syscall.Signal, all bool) erro
// fgProcess is true, then the signal is sent to the foreground process group
// in the same session that PID belongs to. This is only valid if the process
// is attached to a host TTY.
-func (s *Sandbox) SignalProcess(cid string, pid int32, sig syscall.Signal, fgProcess bool) error {
+func (s *Sandbox) SignalProcess(cid string, pid int32, sig unix.Signal, fgProcess bool) error {
log.Debugf("Signal sandbox %q", s.ID)
conn, err := s.sandboxConnect()
if err != nil {
@@ -984,7 +985,7 @@ func (s *Sandbox) Resume(cid string) error {
func (s *Sandbox) IsRunning() bool {
if s.Pid != 0 {
// Send a signal 0 to the sandbox process.
- if err := syscall.Kill(s.Pid, 0); err == nil {
+ if err := unix.Kill(s.Pid, 0); err == nil {
// Succeeded, process is running.
return true
}
@@ -1147,7 +1148,7 @@ func (s *Sandbox) waitForStopped() error {
}
// The sandbox process is a child of the current process,
// so we can wait it and collect its zombie.
- wpid, err := syscall.Wait4(int(s.Pid), &s.status, syscall.WNOHANG, nil)
+ wpid, err := unix.Wait4(int(s.Pid), &s.status, unix.WNOHANG, nil)
if err != nil {
return fmt.Errorf("error waiting the sandbox process: %v", err)
}
diff --git a/runsc/specutils/fs.go b/runsc/specutils/fs.go
index 138aa4dd1..b62504a8c 100644
--- a/runsc/specutils/fs.go
+++ b/runsc/specutils/fs.go
@@ -18,9 +18,9 @@ import (
"fmt"
"math/bits"
"path"
- "syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
)
type mapping struct {
@@ -31,48 +31,48 @@ type mapping struct {
// optionsMap maps mount propagation-related OCI filesystem options to mount(2)
// syscall flags.
var optionsMap = map[string]mapping{
- "acl": {set: true, val: syscall.MS_POSIXACL},
- "async": {set: false, val: syscall.MS_SYNCHRONOUS},
- "atime": {set: false, val: syscall.MS_NOATIME},
- "bind": {set: true, val: syscall.MS_BIND},
+ "acl": {set: true, val: unix.MS_POSIXACL},
+ "async": {set: false, val: unix.MS_SYNCHRONOUS},
+ "atime": {set: false, val: unix.MS_NOATIME},
+ "bind": {set: true, val: unix.MS_BIND},
"defaults": {set: true, val: 0},
- "dev": {set: false, val: syscall.MS_NODEV},
- "diratime": {set: false, val: syscall.MS_NODIRATIME},
- "dirsync": {set: true, val: syscall.MS_DIRSYNC},
- "exec": {set: false, val: syscall.MS_NOEXEC},
- "noexec": {set: true, val: syscall.MS_NOEXEC},
- "iversion": {set: true, val: syscall.MS_I_VERSION},
- "loud": {set: false, val: syscall.MS_SILENT},
- "mand": {set: true, val: syscall.MS_MANDLOCK},
- "noacl": {set: false, val: syscall.MS_POSIXACL},
- "noatime": {set: true, val: syscall.MS_NOATIME},
- "nodev": {set: true, val: syscall.MS_NODEV},
- "nodiratime": {set: true, val: syscall.MS_NODIRATIME},
- "noiversion": {set: false, val: syscall.MS_I_VERSION},
- "nomand": {set: false, val: syscall.MS_MANDLOCK},
- "norelatime": {set: false, val: syscall.MS_RELATIME},
- "nostrictatime": {set: false, val: syscall.MS_STRICTATIME},
- "nosuid": {set: true, val: syscall.MS_NOSUID},
- "rbind": {set: true, val: syscall.MS_BIND | syscall.MS_REC},
- "relatime": {set: true, val: syscall.MS_RELATIME},
- "remount": {set: true, val: syscall.MS_REMOUNT},
- "ro": {set: true, val: syscall.MS_RDONLY},
- "rw": {set: false, val: syscall.MS_RDONLY},
- "silent": {set: true, val: syscall.MS_SILENT},
- "strictatime": {set: true, val: syscall.MS_STRICTATIME},
- "suid": {set: false, val: syscall.MS_NOSUID},
- "sync": {set: true, val: syscall.MS_SYNCHRONOUS},
+ "dev": {set: false, val: unix.MS_NODEV},
+ "diratime": {set: false, val: unix.MS_NODIRATIME},
+ "dirsync": {set: true, val: unix.MS_DIRSYNC},
+ "exec": {set: false, val: unix.MS_NOEXEC},
+ "noexec": {set: true, val: unix.MS_NOEXEC},
+ "iversion": {set: true, val: unix.MS_I_VERSION},
+ "loud": {set: false, val: unix.MS_SILENT},
+ "mand": {set: true, val: unix.MS_MANDLOCK},
+ "noacl": {set: false, val: unix.MS_POSIXACL},
+ "noatime": {set: true, val: unix.MS_NOATIME},
+ "nodev": {set: true, val: unix.MS_NODEV},
+ "nodiratime": {set: true, val: unix.MS_NODIRATIME},
+ "noiversion": {set: false, val: unix.MS_I_VERSION},
+ "nomand": {set: false, val: unix.MS_MANDLOCK},
+ "norelatime": {set: false, val: unix.MS_RELATIME},
+ "nostrictatime": {set: false, val: unix.MS_STRICTATIME},
+ "nosuid": {set: true, val: unix.MS_NOSUID},
+ "rbind": {set: true, val: unix.MS_BIND | unix.MS_REC},
+ "relatime": {set: true, val: unix.MS_RELATIME},
+ "remount": {set: true, val: unix.MS_REMOUNT},
+ "ro": {set: true, val: unix.MS_RDONLY},
+ "rw": {set: false, val: unix.MS_RDONLY},
+ "silent": {set: true, val: unix.MS_SILENT},
+ "strictatime": {set: true, val: unix.MS_STRICTATIME},
+ "suid": {set: false, val: unix.MS_NOSUID},
+ "sync": {set: true, val: unix.MS_SYNCHRONOUS},
}
// propOptionsMap is similar to optionsMap, but it lists propagation options
// that cannot be used together with other flags.
var propOptionsMap = map[string]mapping{
- "private": {set: true, val: syscall.MS_PRIVATE},
- "rprivate": {set: true, val: syscall.MS_PRIVATE | syscall.MS_REC},
- "slave": {set: true, val: syscall.MS_SLAVE},
- "rslave": {set: true, val: syscall.MS_SLAVE | syscall.MS_REC},
- "unbindable": {set: true, val: syscall.MS_UNBINDABLE},
- "runbindable": {set: true, val: syscall.MS_UNBINDABLE | syscall.MS_REC},
+ "private": {set: true, val: unix.MS_PRIVATE},
+ "rprivate": {set: true, val: unix.MS_PRIVATE | unix.MS_REC},
+ "slave": {set: true, val: unix.MS_SLAVE},
+ "rslave": {set: true, val: unix.MS_SLAVE | unix.MS_REC},
+ "unbindable": {set: true, val: unix.MS_UNBINDABLE},
+ "runbindable": {set: true, val: unix.MS_UNBINDABLE | unix.MS_REC},
}
// invalidOptions list options not allowed.
@@ -139,7 +139,7 @@ func ValidateMountOptions(opts []string) error {
// correct.
func validateRootfsPropagation(opt string) error {
flags := PropOptionsToFlags([]string{opt})
- if flags&(syscall.MS_SLAVE|syscall.MS_PRIVATE) == 0 {
+ if flags&(unix.MS_SLAVE|unix.MS_PRIVATE) == 0 {
return fmt.Errorf("root mount propagation option must specify private or slave: %q", opt)
}
return validatePropagation(opt)
@@ -147,7 +147,7 @@ func validateRootfsPropagation(opt string) error {
func validatePropagation(opt string) error {
flags := PropOptionsToFlags([]string{opt})
- exclusive := flags & (syscall.MS_SLAVE | syscall.MS_PRIVATE | syscall.MS_SHARED | syscall.MS_UNBINDABLE)
+ exclusive := flags & (unix.MS_SLAVE | unix.MS_PRIVATE | unix.MS_SHARED | unix.MS_UNBINDABLE)
if bits.OnesCount32(exclusive) > 1 {
return fmt.Errorf("mount propagation options are mutually exclusive: %q", opt)
}
diff --git a/runsc/specutils/namespace.go b/runsc/specutils/namespace.go
index 23001d67c..69d7ba5c4 100644
--- a/runsc/specutils/namespace.go
+++ b/runsc/specutils/namespace.go
@@ -109,7 +109,7 @@ func FilterNS(filter []specs.LinuxNamespaceType, s *specs.Spec) []specs.LinuxNam
// setNS sets the namespace of the given type. It must be called with
// OSThreadLocked.
func setNS(fd, nsType uintptr) error {
- if _, _, err := syscall.RawSyscall(unix.SYS_SETNS, fd, nsType, 0); err != 0 {
+ if _, _, err := unix.RawSyscall(unix.SYS_SETNS, fd, nsType, 0); err != 0 {
return err
}
return nil
@@ -158,7 +158,7 @@ func StartInNS(cmd *exec.Cmd, nss []specs.LinuxNamespace) error {
defer runtime.UnlockOSThread()
if cmd.SysProcAttr == nil {
- cmd.SysProcAttr = &syscall.SysProcAttr{}
+ cmd.SysProcAttr = &unix.SysProcAttr{}
}
for _, ns := range nss {
@@ -185,7 +185,7 @@ func SetUIDGIDMappings(cmd *exec.Cmd, s *specs.Spec) {
return
}
if cmd.SysProcAttr == nil {
- cmd.SysProcAttr = &syscall.SysProcAttr{}
+ cmd.SysProcAttr = &unix.SysProcAttr{}
}
for _, idMap := range s.Linux.UIDMappings {
log.Infof("Mapping host uid %d to container uid %d (size=%d)", idMap.HostID, idMap.ContainerID, idMap.Size)
@@ -241,8 +241,8 @@ func MaybeRunAsRoot() error {
cmd := exec.Command("/proc/self/exe", os.Args[1:]...)
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Cloneflags: syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS,
+ cmd.SysProcAttr = &unix.SysProcAttr{
+ Cloneflags: unix.CLONE_NEWUSER | unix.CLONE_NEWNS,
// Set current user/group as root inside the namespace. Since we may not
// have CAP_SETUID/CAP_SETGID, just map root to the current user/group.
UidMappings: []syscall.SysProcIDMap{
@@ -255,7 +255,7 @@ func MaybeRunAsRoot() error {
GidMappingsEnableSetgroups: false,
// Make sure child is killed when the parent terminates.
- Pdeathsig: syscall.SIGKILL,
+ Pdeathsig: unix.SIGKILL,
}
cmd.Env = os.Environ()
diff --git a/runsc/specutils/seccomp/BUILD b/runsc/specutils/seccomp/BUILD
index 3520f2d6d..e9e647d82 100644
--- a/runsc/specutils/seccomp/BUILD
+++ b/runsc/specutils/seccomp/BUILD
@@ -18,6 +18,7 @@ go_library(
"//pkg/sentry/kernel",
"//pkg/sentry/syscalls/linux",
"@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
+ "@org_golang_x_sys//unix:go_default_library",
],
)
@@ -30,5 +31,6 @@ go_test(
"//pkg/binary",
"//pkg/bpf",
"@com_github_opencontainers_runtime_spec//specs-go:go_default_library",
+ "@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/runsc/specutils/seccomp/seccomp.go b/runsc/specutils/seccomp/seccomp.go
index 5932f7a41..0ef7a4d54 100644
--- a/runsc/specutils/seccomp/seccomp.go
+++ b/runsc/specutils/seccomp/seccomp.go
@@ -18,9 +18,9 @@ package seccomp
import (
"fmt"
- "syscall"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bpf"
"gvisor.dev/gvisor/pkg/log"
@@ -33,9 +33,9 @@ var (
killThreadAction = linux.SECCOMP_RET_KILL_THREAD
trapAction = linux.SECCOMP_RET_TRAP
// runc always returns EPERM as the errorcode for SECCOMP_RET_ERRNO
- errnoAction = linux.SECCOMP_RET_ERRNO.WithReturnCode(uint16(syscall.EPERM))
+ errnoAction = linux.SECCOMP_RET_ERRNO.WithReturnCode(uint16(unix.EPERM))
// runc always returns EPERM as the errorcode for SECCOMP_RET_TRACE
- traceAction = linux.SECCOMP_RET_TRACE.WithReturnCode(uint16(syscall.EPERM))
+ traceAction = linux.SECCOMP_RET_TRACE.WithReturnCode(uint16(unix.EPERM))
allowAction = linux.SECCOMP_RET_ALLOW
)
diff --git a/runsc/specutils/seccomp/seccomp_test.go b/runsc/specutils/seccomp/seccomp_test.go
index 850c237ba..11a6c8daa 100644
--- a/runsc/specutils/seccomp/seccomp_test.go
+++ b/runsc/specutils/seccomp/seccomp_test.go
@@ -16,10 +16,10 @@ package seccomp
import (
"fmt"
- "syscall"
"testing"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/bpf"
)
@@ -184,7 +184,7 @@ var (
Args: []specs.LinuxSeccompArg{
{
Index: 0,
- Value: syscall.CLONE_FS,
+ Value: unix.CLONE_FS,
Op: specs.OpEqualTo,
},
},
@@ -192,7 +192,7 @@ var (
},
},
},
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{syscall.CLONE_FS}),
+ input: testInput(nativeArchAuditNo, "clone", &[6]uint64{unix.CLONE_FS}),
expected: uint32(errnoAction),
},
{
@@ -207,12 +207,12 @@ var (
Args: []specs.LinuxSeccompArg{
{
Index: 0,
- Value: syscall.CLONE_FS,
+ Value: unix.CLONE_FS,
Op: specs.OpEqualTo,
},
{
Index: 0,
- Value: syscall.CLONE_VM,
+ Value: unix.CLONE_VM,
Op: specs.OpEqualTo,
},
},
@@ -220,7 +220,7 @@ var (
},
},
},
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{syscall.CLONE_FS}),
+ input: testInput(nativeArchAuditNo, "clone", &[6]uint64{unix.CLONE_FS}),
expected: uint32(errnoAction),
},
{
@@ -235,12 +235,12 @@ var (
Args: []specs.LinuxSeccompArg{
{
Index: 1,
- Value: syscall.SOL_SOCKET,
+ Value: unix.SOL_SOCKET,
Op: specs.OpEqualTo,
},
{
Index: 2,
- Value: syscall.SO_PEERCRED,
+ Value: unix.SO_PEERCRED,
Op: specs.OpEqualTo,
},
},
@@ -248,7 +248,7 @@ var (
},
},
},
- input: testInput(nativeArchAuditNo, "getsockopt", &[6]uint64{0, syscall.SOL_SOCKET, syscall.SO_PEERCRED}),
+ input: testInput(nativeArchAuditNo, "getsockopt", &[6]uint64{0, unix.SOL_SOCKET, unix.SO_PEERCRED}),
expected: uint32(errnoAction),
},
{
@@ -263,12 +263,12 @@ var (
Args: []specs.LinuxSeccompArg{
{
Index: 1,
- Value: syscall.SOL_SOCKET,
+ Value: unix.SOL_SOCKET,
Op: specs.OpEqualTo,
},
{
Index: 2,
- Value: syscall.SO_PEERCRED,
+ Value: unix.SO_PEERCRED,
Op: specs.OpEqualTo,
},
},
@@ -276,7 +276,7 @@ var (
},
},
},
- input: testInput(nativeArchAuditNo, "getsockopt", &[6]uint64{0, syscall.SOL_SOCKET}),
+ input: testInput(nativeArchAuditNo, "getsockopt", &[6]uint64{0, unix.SOL_SOCKET}),
expected: uint32(allowAction),
},
{
@@ -291,7 +291,7 @@ var (
Args: []specs.LinuxSeccompArg{
{
Index: 0,
- Value: syscall.CLONE_FS,
+ Value: unix.CLONE_FS,
Op: specs.OpEqualTo,
},
},
@@ -299,7 +299,7 @@ var (
},
},
},
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{syscall.CLONE_VM}),
+ input: testInput(nativeArchAuditNo, "clone", &[6]uint64{unix.CLONE_VM}),
expected: uint32(allowAction),
},
{
@@ -314,8 +314,8 @@ var (
Args: []specs.LinuxSeccompArg{
{
Index: 0,
- Value: syscall.CLONE_FS,
- ValueTwo: syscall.CLONE_FS,
+ Value: unix.CLONE_FS,
+ ValueTwo: unix.CLONE_FS,
Op: specs.OpMaskedEqual,
},
},
@@ -323,7 +323,7 @@ var (
},
},
},
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{syscall.CLONE_FS | syscall.CLONE_VM}),
+ input: testInput(nativeArchAuditNo, "clone", &[6]uint64{unix.CLONE_FS | unix.CLONE_VM}),
expected: uint32(errnoAction),
},
{
@@ -338,8 +338,8 @@ var (
Args: []specs.LinuxSeccompArg{
{
Index: 0,
- Value: syscall.CLONE_FS | syscall.CLONE_VM,
- ValueTwo: syscall.CLONE_FS | syscall.CLONE_VM,
+ Value: unix.CLONE_FS | unix.CLONE_VM,
+ ValueTwo: unix.CLONE_FS | unix.CLONE_VM,
Op: specs.OpMaskedEqual,
},
},
@@ -347,7 +347,7 @@ var (
},
},
},
- input: testInput(nativeArchAuditNo, "clone", &[6]uint64{syscall.CLONE_FS}),
+ input: testInput(nativeArchAuditNo, "clone", &[6]uint64{unix.CLONE_FS}),
expected: uint32(allowAction),
},
{
diff --git a/runsc/specutils/specutils.go b/runsc/specutils/specutils.go
index ea55bbc7d..5ba38bfe4 100644
--- a/runsc/specutils/specutils.go
+++ b/runsc/specutils/specutils.go
@@ -26,12 +26,12 @@ import (
"path/filepath"
"strconv"
"strings"
- "syscall"
"time"
"github.com/cenkalti/backoff"
"github.com/mohae/deepcopy"
specs "github.com/opencontainers/runtime-spec/specs-go"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/abi/linux"
"gvisor.dev/gvisor/pkg/bits"
"gvisor.dev/gvisor/pkg/log"
@@ -375,9 +375,9 @@ func WaitForReady(pid int, timeout time.Duration, ready func() (bool, error)) er
// Check if the process is still running.
// If the process is alive, child is 0 because of the NOHANG option.
// If the process has terminated, child equals the process id.
- var ws syscall.WaitStatus
- var ru syscall.Rusage
- child, err := syscall.Wait4(pid, &ws, syscall.WNOHANG, &ru)
+ var ws unix.WaitStatus
+ var ru unix.Rusage
+ child, err := unix.Wait4(pid, &ws, unix.WNOHANG, &ru)
if err != nil {
return backoff.Permanent(fmt.Errorf("error waiting for process: %v", err))
} else if child == pid {
@@ -437,7 +437,7 @@ func Mount(src, dst, typ string, flags uint32) error {
return fmt.Errorf("mkdir(%q) failed: %v", parent, err)
}
// Create the destination file if it does not exist.
- f, err := os.OpenFile(dst, syscall.O_CREAT, 0777)
+ f, err := os.OpenFile(dst, unix.O_CREAT, 0777)
if err != nil {
return fmt.Errorf("open(%q) failed: %v", dst, err)
}
@@ -445,7 +445,7 @@ func Mount(src, dst, typ string, flags uint32) error {
}
// Do the mount.
- if err := syscall.Mount(src, dst, typ, uintptr(flags), ""); err != nil {
+ if err := unix.Mount(src, dst, typ, uintptr(flags), ""); err != nil {
return fmt.Errorf("mount(%q, %q, %d) failed: %v", src, dst, flags, err)
}
return nil
@@ -466,7 +466,7 @@ func ContainsStr(strs []string, str string) bool {
func RetryEintr(f func() (uintptr, uintptr, error)) (uintptr, uintptr, error) {
for {
r1, r2, err := f()
- if err != syscall.EINTR {
+ if err != unix.EINTR {
return r1, r2, err
}
}
diff --git a/test/benchmarks/fs/BUILD b/test/benchmarks/fs/BUILD
index b4f967441..c94caab60 100644
--- a/test/benchmarks/fs/BUILD
+++ b/test/benchmarks/fs/BUILD
@@ -11,6 +11,7 @@ benchmark_test(
"//pkg/test/dockerutil",
"//test/benchmarks/harness",
"//test/benchmarks/tools",
+ "@com_github_docker_docker//api/types/mount:go_default_library",
],
)
diff --git a/test/benchmarks/fs/bazel_test.go b/test/benchmarks/fs/bazel_test.go
index 8baeff0db..7ced963f6 100644
--- a/test/benchmarks/fs/bazel_test.go
+++ b/test/benchmarks/fs/bazel_test.go
@@ -25,6 +25,13 @@ import (
"gvisor.dev/gvisor/test/benchmarks/tools"
)
+// Dimensions here are clean/dirty cache (do or don't drop caches)
+// and if the mount on which we are compiling is a tmpfs/bind mount.
+type benchmark struct {
+ clearCache bool // clearCache drops caches before running.
+ fstype string // type of filesystem to use.
+}
+
// Note: CleanCache versions of this test require running with root permissions.
func BenchmarkBuildABSL(b *testing.B) {
runBuildBenchmark(b, "benchmarks/absl", "/abseil-cpp", "absl/base/...")
@@ -45,17 +52,18 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {
}
defer machine.CleanUp()
- // Dimensions here are clean/dirty cache (do or don't drop caches)
- // and if the mount on which we are compiling is a tmpfs/bind mount.
- benchmarks := []struct {
- clearCache bool // clearCache drops caches before running.
- tmpfs bool // tmpfs will run compilation on a tmpfs.
- }{
- {clearCache: true, tmpfs: false},
- {clearCache: false, tmpfs: false},
- {clearCache: true, tmpfs: true},
- {clearCache: false, tmpfs: true},
+ benchmarks := make([]benchmark, 0, 6)
+ for _, filesys := range []string{harness.BindFS, harness.TmpFS, harness.RootFS} {
+ benchmarks = append(benchmarks, benchmark{
+ clearCache: true,
+ fstype: filesys,
+ })
+ benchmarks = append(benchmarks, benchmark{
+ clearCache: false,
+ fstype: filesys,
+ })
}
+
for _, bm := range benchmarks {
pageCache := tools.Parameter{
Name: "page_cache",
@@ -67,10 +75,7 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {
filesystem := tools.Parameter{
Name: "filesystem",
- Value: "bind",
- }
- if bm.tmpfs {
- filesystem.Value = "tmpfs"
+ Value: bm.fstype,
}
name, err := tools.ParametersToName(pageCache, filesystem)
if err != nil {
@@ -83,21 +88,25 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {
container := machine.GetContainer(ctx, b)
defer container.CleanUp(ctx)
+ mts, prefix, cleanup, err := harness.MakeMount(machine, bm.fstype)
+ if err != nil {
+ b.Fatalf("Failed to make mount: %v", err)
+ }
+ defer cleanup()
+
+ runOpts := dockerutil.RunOpts{
+ Image: image,
+ Mounts: mts,
+ }
+
// Start a container and sleep.
- if err := container.Spawn(ctx, dockerutil.RunOpts{
- Image: image,
- }, "sleep", fmt.Sprintf("%d", 1000000)); err != nil {
+ if err := container.Spawn(ctx, runOpts, "sleep", fmt.Sprintf("%d", 1000000)); err != nil {
b.Fatalf("run failed with: %v", err)
}
- // If we are running on a tmpfs, copy to /tmp which is a tmpfs.
- prefix := ""
- if bm.tmpfs {
- if out, err := container.Exec(ctx, dockerutil.ExecOpts{},
- "cp", "-r", workdir, "/tmp/."); err != nil {
- b.Fatalf("failed to copy directory: %v (%s)", err, out)
- }
- prefix = "/tmp"
+ if out, err := container.Exec(ctx, dockerutil.ExecOpts{},
+ "cp", "-rf", workdir, prefix+"/."); err != nil {
+ b.Fatalf("failed to copy directory: %v (%s)", err, out)
}
b.ResetTimer()
@@ -118,7 +127,7 @@ func runBuildBenchmark(b *testing.B, image, workdir, target string) {
WorkDir: prefix + workdir,
}, "bazel", "build", "-c", "opt", target)
if err != nil {
- b.Fatalf("build failed with: %v", err)
+ b.Fatalf("build failed with: %v logs: %s", err, got)
}
b.StopTimer()
diff --git a/test/benchmarks/fs/fio_test.go b/test/benchmarks/fs/fio_test.go
index cc2d1cbbc..f783a2b33 100644
--- a/test/benchmarks/fs/fio_test.go
+++ b/test/benchmarks/fs/fio_test.go
@@ -21,7 +21,6 @@ import (
"strings"
"testing"
- "github.com/docker/docker/api/types/mount"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/test/benchmarks/harness"
"gvisor.dev/gvisor/test/benchmarks/tools"
@@ -70,7 +69,7 @@ func BenchmarkFio(b *testing.B) {
}
defer machine.CleanUp()
- for _, fsType := range []mount.Type{mount.TypeBind, mount.TypeTmpfs} {
+ for _, fsType := range []string{harness.BindFS, harness.TmpFS, harness.RootFS} {
for _, tc := range testCases {
operation := tools.Parameter{
Name: "operation",
@@ -82,7 +81,7 @@ func BenchmarkFio(b *testing.B) {
}
filesystem := tools.Parameter{
Name: "filesystem",
- Value: string(fsType),
+ Value: fsType,
}
name, err := tools.ParametersToName(operation, blockSize, filesystem)
if err != nil {
@@ -95,13 +94,7 @@ func BenchmarkFio(b *testing.B) {
container := machine.GetContainer(ctx, b)
defer container.CleanUp(ctx)
- // Directory and filename inside container where fio will read/write.
- outdir := "/data"
- outfile := filepath.Join(outdir, "test.txt")
-
- // Make the required mount and grab a cleanup for bind mounts
- // as they are backed by a temp directory (mktemp).
- mnt, mountCleanup, err := makeMount(machine, fsType, outdir)
+ mnts, outdir, mountCleanup, err := harness.MakeMount(machine, fsType)
if err != nil {
b.Fatalf("failed to make mount: %v", err)
}
@@ -109,12 +102,9 @@ func BenchmarkFio(b *testing.B) {
// Start the container with the mount.
if err := container.Spawn(
- ctx,
- dockerutil.RunOpts{
- Image: "benchmarks/fio",
- Mounts: []mount.Mount{
- mnt,
- },
+ ctx, dockerutil.RunOpts{
+ Image: "benchmarks/fio",
+ Mounts: mnts,
},
// Sleep on the order of b.N.
"sleep", fmt.Sprintf("%d", 1000*b.N),
@@ -122,6 +112,9 @@ func BenchmarkFio(b *testing.B) {
b.Fatalf("failed to start fio container with: %v", err)
}
+ // Directory and filename inside container where fio will read/write.
+ outfile := filepath.Join(outdir, "test.txt")
+
// For reads, we need a file to read so make one inside the container.
if strings.Contains(tc.Test, "read") {
fallocateCmd := fmt.Sprintf("fallocate -l %dK %s", tc.Size, outfile)
@@ -135,6 +128,7 @@ func BenchmarkFio(b *testing.B) {
if err := harness.DropCaches(machine); err != nil {
b.Skipf("failed to drop caches with %v. You probably need root.", err)
}
+
cmd := tc.MakeCmd(outfile)
if err := harness.DropCaches(machine); err != nil {
@@ -154,39 +148,6 @@ func BenchmarkFio(b *testing.B) {
}
}
-// makeMount makes a mount and cleanup based on the requested type. Bind
-// and volume mounts are backed by a temp directory made with mktemp.
-// tmpfs mounts require no such backing and are just made.
-// It is up to the caller to call the returned cleanup.
-func makeMount(machine harness.Machine, mountType mount.Type, target string) (mount.Mount, func(), error) {
- switch mountType {
- case mount.TypeVolume, mount.TypeBind:
- dir, err := machine.RunCommand("mktemp", "-d")
- if err != nil {
- return mount.Mount{}, func() {}, fmt.Errorf("failed to create tempdir: %v", err)
- }
- dir = strings.TrimSuffix(dir, "\n")
-
- out, err := machine.RunCommand("chmod", "777", dir)
- if err != nil {
- machine.RunCommand("rm", "-rf", dir)
- return mount.Mount{}, func() {}, fmt.Errorf("failed modify directory: %v %s", err, out)
- }
- return mount.Mount{
- Target: target,
- Source: dir,
- Type: mount.TypeBind,
- }, func() { machine.RunCommand("rm", "-rf", dir) }, nil
- case mount.TypeTmpfs:
- return mount.Mount{
- Target: target,
- Type: mount.TypeTmpfs,
- }, func() {}, nil
- default:
- return mount.Mount{}, func() {}, fmt.Errorf("illegal mount time not supported: %v", mountType)
- }
-}
-
// TestMain is the main method for package fs.
func TestMain(m *testing.M) {
harness.Init()
diff --git a/test/benchmarks/harness/BUILD b/test/benchmarks/harness/BUILD
index c2e316709..116610938 100644
--- a/test/benchmarks/harness/BUILD
+++ b/test/benchmarks/harness/BUILD
@@ -14,5 +14,6 @@ go_library(
deps = [
"//pkg/test/dockerutil",
"//pkg/test/testutil",
+ "@com_github_docker_docker//api/types/mount:go_default_library",
],
)
diff --git a/test/benchmarks/harness/util.go b/test/benchmarks/harness/util.go
index aeac7ebff..36abe1069 100644
--- a/test/benchmarks/harness/util.go
+++ b/test/benchmarks/harness/util.go
@@ -18,8 +18,10 @@ import (
"context"
"fmt"
"net"
+ "strings"
"testing"
+ "github.com/docker/docker/api/types/mount"
"gvisor.dev/gvisor/pkg/test/dockerutil"
"gvisor.dev/gvisor/pkg/test/testutil"
)
@@ -55,3 +57,53 @@ func DebugLog(b *testing.B, msg string, args ...interface{}) {
b.Logf(msg, args...)
}
}
+
+const (
+ // BindFS indicates a bind mount should be created.
+ BindFS = "bindfs"
+ // TmpFS indicates a tmpfs mount should be created.
+ TmpFS = "tmpfs"
+ // RootFS indicates no mount should be created and the root mount should be used.
+ RootFS = "rootfs"
+)
+
+// MakeMount makes a mount and cleanup based on the requested type. Bind
+// and volume mounts are backed by a temp directory made with mktemp.
+// tmpfs mounts require no such backing and are just made.
+// rootfs mounts do not make a mount, but instead return a target direectory at root.
+// It is up to the caller to call the returned cleanup.
+func MakeMount(machine Machine, fsType string) ([]mount.Mount, string, func(), error) {
+ mounts := make([]mount.Mount, 0, 1)
+ switch fsType {
+ case BindFS:
+ dir, err := machine.RunCommand("mktemp", "-d")
+ if err != nil {
+ return mounts, "", func() {}, fmt.Errorf("failed to create tempdir: %v", err)
+ }
+ dir = strings.TrimSuffix(dir, "\n")
+
+ out, err := machine.RunCommand("chmod", "777", dir)
+ if err != nil {
+ machine.RunCommand("rm", "-rf", dir)
+ return mounts, "", func() {}, fmt.Errorf("failed modify directory: %v %s", err, out)
+ }
+ target := "/data"
+ mounts = append(mounts, mount.Mount{
+ Target: target,
+ Source: dir,
+ Type: mount.TypeBind,
+ })
+ return mounts, target, func() { machine.RunCommand("rm", "-rf", dir) }, nil
+ case RootFS:
+ return mounts, "/", func() {}, nil
+ case TmpFS:
+ target := "/data"
+ mounts = append(mounts, mount.Mount{
+ Target: target,
+ Type: mount.TypeTmpfs,
+ })
+ return mounts, target, func() {}, nil
+ default:
+ return mounts, "", func() {}, fmt.Errorf("illegal mount type not supported: %v", fsType)
+ }
+}
diff --git a/test/benchmarks/tcp/tcp_proxy.go b/test/benchmarks/tcp/tcp_proxy.go
index 780e4f7ae..be74e4d4a 100644
--- a/test/benchmarks/tcp/tcp_proxy.go
+++ b/test/benchmarks/tcp/tcp_proxy.go
@@ -29,7 +29,6 @@ import (
"runtime"
"runtime/pprof"
"strconv"
- "syscall"
"time"
"golang.org/x/sys/unix"
@@ -112,33 +111,33 @@ func setupNetwork(ifaceName string, numChannels int) (fds []int, err error) {
const protocol = 0x0300 // htons(ETH_P_ALL)
fds := make([]int, numChannels)
for i := range fds {
- fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, protocol)
+ fd, err := unix.Socket(unix.AF_PACKET, unix.SOCK_RAW, protocol)
if err != nil {
return nil, fmt.Errorf("unable to create raw socket: %v", err)
}
// Bind to the appropriate device.
- ll := syscall.SockaddrLinklayer{
+ ll := unix.SockaddrLinklayer{
Protocol: protocol,
Ifindex: iface.Index,
- Pkttype: syscall.PACKET_HOST,
+ Pkttype: unix.PACKET_HOST,
}
- if err := syscall.Bind(fd, &ll); err != nil {
+ if err := unix.Bind(fd, &ll); err != nil {
return nil, fmt.Errorf("unable to bind to %q: %v", iface.Name, err)
}
// RAW Sockets by default have a very small SO_RCVBUF of 256KB,
// up it to at least 4MB to reduce packet drops.
- if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_RCVBUF, bufSize); err != nil {
+ if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF, bufSize); err != nil {
return nil, fmt.Errorf("setsockopt(..., SO_RCVBUF, %v,..) = %v", bufSize, err)
}
- if err := syscall.SetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_SNDBUF, bufSize); err != nil {
+ if err := unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF, bufSize); err != nil {
return nil, fmt.Errorf("setsockopt(..., SO_SNDBUF, %v,..) = %v", bufSize, err)
}
if !*swgso && *gso != 0 {
- if err := syscall.SetsockoptInt(fd, syscall.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil {
+ if err := unix.SetsockoptInt(fd, unix.SOL_PACKET, unix.PACKET_VNET_HDR, 1); err != nil {
return nil, fmt.Errorf("unable to enable the PACKET_VNET_HDR option: %v", err)
}
}
@@ -403,7 +402,7 @@ func main() {
log.Printf("client=%v, server=%v, ready.", *client, *server)
sigs := make(chan os.Signal, 1)
- signal.Notify(sigs, syscall.SIGTERM)
+ signal.Notify(sigs, unix.SIGTERM)
go func() {
<-sigs
if *cpuprofile != "" {
diff --git a/test/fuse/linux/mount_test.cc b/test/fuse/linux/mount_test.cc
index 8a5478116..276f842ea 100644
--- a/test/fuse/linux/mount_test.cc
+++ b/test/fuse/linux/mount_test.cc
@@ -15,6 +15,7 @@
#include <errno.h>
#include <fcntl.h>
#include <sys/mount.h>
+#include <unistd.h>
#include "gtest/gtest.h"
#include "test/util/mount_util.h"
@@ -29,7 +30,9 @@ namespace {
TEST(FuseMount, Success) {
const FileDescriptor fd =
ASSERT_NO_ERRNO_AND_VALUE(Open("/dev/fuse", O_WRONLY));
- std::string mopts = absl::StrCat("fd=", std::to_string(fd.get()));
+ std::string mopts =
+ absl::StrFormat("fd=%d,user_id=%d,group_id=%d,rootmode=0777", fd.get(),
+ getuid(), getgid());
const auto dir = ASSERT_NO_ERRNO_AND_VALUE(TempPath::CreateDir());
diff --git a/test/iptables/BUILD b/test/iptables/BUILD
index ae4bba847..94d4ca2d4 100644
--- a/test/iptables/BUILD
+++ b/test/iptables/BUILD
@@ -18,6 +18,7 @@ go_library(
"//pkg/binary",
"//pkg/test/testutil",
"//pkg/usermem",
+ "@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/test/iptables/iptables_unsafe.go b/test/iptables/iptables_unsafe.go
index bd85a8fea..dd1a1c082 100644
--- a/test/iptables/iptables_unsafe.go
+++ b/test/iptables/iptables_unsafe.go
@@ -16,12 +16,13 @@ package iptables
import (
"fmt"
- "syscall"
"unsafe"
+
+ "golang.org/x/sys/unix"
)
type originalDstError struct {
- errno syscall.Errno
+ errno unix.Errno
}
func (e originalDstError) Error() string {
@@ -32,27 +33,27 @@ func (e originalDstError) Error() string {
// getsockopt.
const SO_ORIGINAL_DST = 80
-func originalDestination4(connfd int) (syscall.RawSockaddrInet4, error) {
- var addr syscall.RawSockaddrInet4
- var addrLen uint32 = syscall.SizeofSockaddrInet4
- if errno := originalDestination(connfd, syscall.SOL_IP, unsafe.Pointer(&addr), &addrLen); errno != 0 {
- return syscall.RawSockaddrInet4{}, originalDstError{errno}
+func originalDestination4(connfd int) (unix.RawSockaddrInet4, error) {
+ var addr unix.RawSockaddrInet4
+ var addrLen uint32 = unix.SizeofSockaddrInet4
+ if errno := originalDestination(connfd, unix.SOL_IP, unsafe.Pointer(&addr), &addrLen); errno != 0 {
+ return unix.RawSockaddrInet4{}, originalDstError{errno}
}
return addr, nil
}
-func originalDestination6(connfd int) (syscall.RawSockaddrInet6, error) {
- var addr syscall.RawSockaddrInet6
- var addrLen uint32 = syscall.SizeofSockaddrInet6
- if errno := originalDestination(connfd, syscall.SOL_IPV6, unsafe.Pointer(&addr), &addrLen); errno != 0 {
- return syscall.RawSockaddrInet6{}, originalDstError{errno}
+func originalDestination6(connfd int) (unix.RawSockaddrInet6, error) {
+ var addr unix.RawSockaddrInet6
+ var addrLen uint32 = unix.SizeofSockaddrInet6
+ if errno := originalDestination(connfd, unix.SOL_IPV6, unsafe.Pointer(&addr), &addrLen); errno != 0 {
+ return unix.RawSockaddrInet6{}, originalDstError{errno}
}
return addr, nil
}
-func originalDestination(connfd int, level uintptr, optval unsafe.Pointer, optlen *uint32) syscall.Errno {
- _, _, errno := syscall.Syscall6(
- syscall.SYS_GETSOCKOPT,
+func originalDestination(connfd int, level uintptr, optval unsafe.Pointer, optlen *uint32) unix.Errno {
+ _, _, errno := unix.Syscall6(
+ unix.SYS_GETSOCKOPT,
uintptr(connfd),
level,
SO_ORIGINAL_DST,
diff --git a/test/iptables/nat.go b/test/iptables/nat.go
index 7f1d6d7ad..70d8a1832 100644
--- a/test/iptables/nat.go
+++ b/test/iptables/nat.go
@@ -19,8 +19,8 @@ import (
"errors"
"fmt"
"net"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/binary"
"gvisor.dev/gvisor/pkg/usermem"
)
@@ -584,33 +584,33 @@ func listenForRedirectedConn(ctx context.Context, ipv6 bool, originalDsts []net.
// traditional syscalls.
// Create the listening socket, bind, listen, and accept.
- family := syscall.AF_INET
+ family := unix.AF_INET
if ipv6 {
- family = syscall.AF_INET6
+ family = unix.AF_INET6
}
- sockfd, err := syscall.Socket(family, syscall.SOCK_STREAM, 0)
+ sockfd, err := unix.Socket(family, unix.SOCK_STREAM, 0)
if err != nil {
return err
}
- defer syscall.Close(sockfd)
+ defer unix.Close(sockfd)
- var bindAddr syscall.Sockaddr
+ var bindAddr unix.Sockaddr
if ipv6 {
- bindAddr = &syscall.SockaddrInet6{
+ bindAddr = &unix.SockaddrInet6{
Port: acceptPort,
Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, // in6addr_any
}
} else {
- bindAddr = &syscall.SockaddrInet4{
+ bindAddr = &unix.SockaddrInet4{
Port: acceptPort,
Addr: [4]byte{0, 0, 0, 0}, // INADDR_ANY
}
}
- if err := syscall.Bind(sockfd, bindAddr); err != nil {
+ if err := unix.Bind(sockfd, bindAddr); err != nil {
return err
}
- if err := syscall.Listen(sockfd, 1); err != nil {
+ if err := unix.Listen(sockfd, 1); err != nil {
return err
}
@@ -619,8 +619,8 @@ func listenForRedirectedConn(ctx context.Context, ipv6 bool, originalDsts []net.
errCh := make(chan error)
go func() {
for {
- connFD, _, err := syscall.Accept(sockfd)
- if errors.Is(err, syscall.EINTR) {
+ connFD, _, err := unix.Accept(sockfd)
+ if errors.Is(err, unix.EINTR) {
continue
}
if err != nil {
@@ -641,7 +641,7 @@ func listenForRedirectedConn(ctx context.Context, ipv6 bool, originalDsts []net.
return err
case connFD = <-connCh:
}
- defer syscall.Close(connFD)
+ defer unix.Close(connFD)
// Verify that, despite listening on acceptPort, SO_ORIGINAL_DST
// indicates the packet was sent to originalDst:dropPort.
@@ -764,35 +764,35 @@ func recvWithRECVORIGDSTADDR(ctx context.Context, ipv6 bool, expectedDst *net.IP
// Create the listening socket.
var (
- family = syscall.AF_INET
- level = syscall.SOL_IP
- option = syscall.IP_RECVORIGDSTADDR
- bindAddr syscall.Sockaddr = &syscall.SockaddrInet4{
+ family = unix.AF_INET
+ level = unix.SOL_IP
+ option = unix.IP_RECVORIGDSTADDR
+ bindAddr unix.Sockaddr = &unix.SockaddrInet4{
Port: int(port),
Addr: [4]byte{0, 0, 0, 0}, // INADDR_ANY
}
)
if ipv6 {
- family = syscall.AF_INET6
- level = syscall.SOL_IPV6
+ family = unix.AF_INET6
+ level = unix.SOL_IPV6
option = 74 // IPV6_RECVORIGDSTADDR, which is missing from the syscall package.
- bindAddr = &syscall.SockaddrInet6{
+ bindAddr = &unix.SockaddrInet6{
Port: int(port),
Addr: [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, // in6addr_any
}
}
- sockfd, err := syscall.Socket(family, syscall.SOCK_DGRAM, 0)
+ sockfd, err := unix.Socket(family, unix.SOCK_DGRAM, 0)
if err != nil {
- return fmt.Errorf("failed Socket(%d, %d, 0): %w", family, syscall.SOCK_DGRAM, err)
+ return fmt.Errorf("failed Socket(%d, %d, 0): %w", family, unix.SOCK_DGRAM, err)
}
- defer syscall.Close(sockfd)
+ defer unix.Close(sockfd)
- if err := syscall.Bind(sockfd, bindAddr); err != nil {
+ if err := unix.Bind(sockfd, bindAddr); err != nil {
return fmt.Errorf("failed Bind(%d, %+v): %v", sockfd, bindAddr, err)
}
// Enable IP_RECVORIGDSTADDR.
- if err := syscall.SetsockoptInt(sockfd, level, option, 1); err != nil {
+ if err := unix.SetsockoptInt(sockfd, level, option, 1); err != nil {
return fmt.Errorf("failed SetsockoptByte(%d, %d, %d, 1): %v", sockfd, level, option, err)
}
@@ -837,41 +837,41 @@ func recvWithRECVORIGDSTADDR(ctx context.Context, ipv6 bool, expectedDst *net.IP
// Verify that the address has the post-NAT port and address.
if ipv6 {
- return addrMatches6(addr.(syscall.RawSockaddrInet6), localAddrs, redirectPort)
+ return addrMatches6(addr.(unix.RawSockaddrInet6), localAddrs, redirectPort)
}
- return addrMatches4(addr.(syscall.RawSockaddrInet4), localAddrs, redirectPort)
+ return addrMatches4(addr.(unix.RawSockaddrInet4), localAddrs, redirectPort)
}
-func recvOrigDstAddr4(sockfd int) (syscall.RawSockaddrInet4, error) {
- buf, err := recvOrigDstAddr(sockfd, syscall.SOL_IP, syscall.SizeofSockaddrInet4)
+func recvOrigDstAddr4(sockfd int) (unix.RawSockaddrInet4, error) {
+ buf, err := recvOrigDstAddr(sockfd, unix.SOL_IP, unix.SizeofSockaddrInet4)
if err != nil {
- return syscall.RawSockaddrInet4{}, err
+ return unix.RawSockaddrInet4{}, err
}
- var addr syscall.RawSockaddrInet4
+ var addr unix.RawSockaddrInet4
binary.Unmarshal(buf, usermem.ByteOrder, &addr)
return addr, nil
}
-func recvOrigDstAddr6(sockfd int) (syscall.RawSockaddrInet6, error) {
- buf, err := recvOrigDstAddr(sockfd, syscall.SOL_IP, syscall.SizeofSockaddrInet6)
+func recvOrigDstAddr6(sockfd int) (unix.RawSockaddrInet6, error) {
+ buf, err := recvOrigDstAddr(sockfd, unix.SOL_IP, unix.SizeofSockaddrInet6)
if err != nil {
- return syscall.RawSockaddrInet6{}, err
+ return unix.RawSockaddrInet6{}, err
}
- var addr syscall.RawSockaddrInet6
+ var addr unix.RawSockaddrInet6
binary.Unmarshal(buf, usermem.ByteOrder, &addr)
return addr, nil
}
func recvOrigDstAddr(sockfd int, level uintptr, addrSize int) ([]byte, error) {
buf := make([]byte, 64)
- oob := make([]byte, syscall.CmsgSpace(addrSize))
+ oob := make([]byte, unix.CmsgSpace(addrSize))
for {
- _, oobn, _, _, err := syscall.Recvmsg(
+ _, oobn, _, _, err := unix.Recvmsg(
sockfd,
buf, // Message buffer.
oob, // Out-of-band buffer.
0) // Flags.
- if errors.Is(err, syscall.EINTR) {
+ if errors.Is(err, unix.EINTR) {
continue
}
if err != nil {
@@ -880,7 +880,7 @@ func recvOrigDstAddr(sockfd int, level uintptr, addrSize int) ([]byte, error) {
oob = oob[:oobn]
// Parse out the control message.
- msgs, err := syscall.ParseSocketControlMessage(oob)
+ msgs, err := unix.ParseSocketControlMessage(oob)
if err != nil {
return nil, fmt.Errorf("failed to parse control message: %w", err)
}
@@ -888,10 +888,10 @@ func recvOrigDstAddr(sockfd int, level uintptr, addrSize int) ([]byte, error) {
}
}
-func addrMatches4(got syscall.RawSockaddrInet4, wantAddrs []net.IP, port uint16) error {
+func addrMatches4(got unix.RawSockaddrInet4, wantAddrs []net.IP, port uint16) error {
for _, wantAddr := range wantAddrs {
- want := syscall.RawSockaddrInet4{
- Family: syscall.AF_INET,
+ want := unix.RawSockaddrInet4{
+ Family: unix.AF_INET,
Port: htons(port),
}
copy(want.Addr[:], wantAddr.To4())
@@ -902,10 +902,10 @@ func addrMatches4(got syscall.RawSockaddrInet4, wantAddrs []net.IP, port uint16)
return fmt.Errorf("got %+v, but wanted one of %+v (note: port numbers are in network byte order)", got, wantAddrs)
}
-func addrMatches6(got syscall.RawSockaddrInet6, wantAddrs []net.IP, port uint16) error {
+func addrMatches6(got unix.RawSockaddrInet6, wantAddrs []net.IP, port uint16) error {
for _, wantAddr := range wantAddrs {
- want := syscall.RawSockaddrInet6{
- Family: syscall.AF_INET6,
+ want := unix.RawSockaddrInet6{
+ Family: unix.AF_INET6,
Port: htons(port),
}
copy(want.Addr[:], wantAddr.To16())
diff --git a/test/packetimpact/runner/dut.go b/test/packetimpact/runner/dut.go
index 3da265b78..1064ca976 100644
--- a/test/packetimpact/runner/dut.go
+++ b/test/packetimpact/runner/dut.go
@@ -109,6 +109,7 @@ type dutInfo struct {
dut DUT
ctrlNet, testNet *dockerutil.Network
netInfo *testbench.DUTTestNet
+ uname *testbench.DUTUname
}
// setUpDUT will set up one DUT and return information for setting up the
@@ -182,6 +183,10 @@ func setUpDUT(ctx context.Context, t *testing.T, id int, mkDevice func(*dockerut
POSIXServerIP: AddressInSubnet(DUTAddr, *ctrlNet.Subnet),
POSIXServerPort: CtrlPort,
}
+ info.uname, err = dut.Uname(ctx)
+ if err != nil {
+ return dutInfo{}, fmt.Errorf("failed to get uname information on DUT: %w", err)
+ }
return info, nil
}
@@ -195,7 +200,7 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co
dutInfoChan := make(chan dutInfo, numDUTs)
errChan := make(chan error, numDUTs)
var dockerNetworks []*dockerutil.Network
- var dutTestNets []*testbench.DUTTestNet
+ var dutInfos []*testbench.DUTInfo
var duts []DUT
setUpCtx, cancelSetup := context.WithCancel(ctx)
@@ -214,7 +219,10 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co
select {
case info := <-dutInfoChan:
dockerNetworks = append(dockerNetworks, info.ctrlNet, info.testNet)
- dutTestNets = append(dutTestNets, info.netInfo)
+ dutInfos = append(dutInfos, &testbench.DUTInfo{
+ Net: info.netInfo,
+ Uname: info.uname,
+ })
duts = append(duts, info.dut)
case err := <-errChan:
t.Fatal(err)
@@ -241,28 +249,29 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co
testbenchContainer,
testbenchAddr,
dockerNetworks,
+ nil, /* sysctls */
"tail", "-f", "/dev/null",
); err != nil {
t.Fatalf("cannot start testbench container: %s", err)
}
- for i := range dutTestNets {
- name, info, err := deviceByIP(ctx, testbenchContainer, dutTestNets[i].LocalIPv4)
+ for i := range dutInfos {
+ name, info, err := deviceByIP(ctx, testbenchContainer, dutInfos[i].Net.LocalIPv4)
if err != nil {
- t.Fatalf("failed to get the device name associated with %s: %s", dutTestNets[i].LocalIPv4, err)
+ t.Fatalf("failed to get the device name associated with %s: %s", dutInfos[i].Net.LocalIPv4, err)
}
- dutTestNets[i].LocalDevName = name
- dutTestNets[i].LocalDevID = info.ID
- dutTestNets[i].LocalMAC = info.MAC
+ dutInfos[i].Net.LocalDevName = name
+ dutInfos[i].Net.LocalDevID = info.ID
+ dutInfos[i].Net.LocalMAC = info.MAC
localIPv6, err := getOrAssignIPv6Addr(ctx, testbenchContainer, name)
if err != nil {
t.Fatalf("failed to get IPV6 address on %s: %s", testbenchContainer.Name, err)
}
- dutTestNets[i].LocalIPv6 = localIPv6
+ dutInfos[i].Net.LocalIPv6 = localIPv6
}
- dutTestNetsBytes, err := json.Marshal(dutTestNets)
+ dutInfosBytes, err := json.Marshal(dutInfos)
if err != nil {
- t.Fatalf("failed to marshal %v into json: %s", dutTestNets, err)
+ t.Fatalf("failed to marshal %v into json: %s", dutInfos, err)
}
baseSnifferArgs := []string{
@@ -296,7 +305,8 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co
"-n",
}
}
- for _, n := range dutTestNets {
+ for _, info := range dutInfos {
+ n := info.Net
snifferArgs := append(baseSnifferArgs, "-i", n.LocalDevName)
if !tshark {
snifferArgs = append(
@@ -351,7 +361,7 @@ func TestWithDUT(ctx context.Context, t *testing.T, mkDevice func(*dockerutil.Co
testArgs = append(testArgs, extraTestArgs...)
testArgs = append(testArgs,
fmt.Sprintf("--native=%t", native),
- "--dut_test_nets_json", string(dutTestNetsBytes),
+ "--dut_infos_json", string(dutInfosBytes),
)
testbenchLogs, err := testbenchContainer.Exec(ctx, dockerutil.ExecOpts{}, testArgs...)
if (err != nil) != expectFailure {
@@ -388,6 +398,10 @@ type DUT interface {
// The t parameter is supposed to be used for t.Cleanup. Don't use it for
// t.Fatal/FailNow functions.
Prepare(ctx context.Context, t *testing.T, runOpts dockerutil.RunOpts, ctrlNet, testNet *dockerutil.Network) (net.IP, net.HardwareAddr, uint32, string, error)
+
+ // Uname gathers information of DUT using command uname.
+ Uname(ctx context.Context) (*testbench.DUTUname, error)
+
// Logs retrieves the logs from the dut.
Logs(ctx context.Context) (string, error)
}
@@ -415,6 +429,10 @@ func (dut *DockerDUT) Prepare(ctx context.Context, _ *testing.T, runOpts dockeru
dut.c,
DUTAddr,
[]*dockerutil.Network{ctrlNet, testNet},
+ map[string]string{
+ // This enables creating ICMP sockets on Linux.
+ "net.ipv4.ping_group_range": "0 0",
+ },
containerPosixServerBinary,
"--ip=0.0.0.0",
fmt.Sprintf("--port=%d", CtrlPort),
@@ -440,6 +458,38 @@ func (dut *DockerDUT) Prepare(ctx context.Context, _ *testing.T, runOpts dockeru
return remoteIPv6, dutDeviceInfo.MAC, dutDeviceInfo.ID, testNetDev, nil
}
+// Uname implements DUT.Uname.
+func (dut *DockerDUT) Uname(ctx context.Context) (*testbench.DUTUname, error) {
+ machine, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "uname", "-m")
+ if err != nil {
+ return nil, err
+ }
+ kernelRelease, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "uname", "-r")
+ if err != nil {
+ return nil, err
+ }
+ kernelVersion, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "uname", "-v")
+ if err != nil {
+ return nil, err
+ }
+ kernelName, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "uname", "-s")
+ if err != nil {
+ return nil, err
+ }
+ // TODO(gvisor.dev/issues/5586): -o is not supported on macOS.
+ operatingSystem, err := dut.c.Exec(ctx, dockerutil.ExecOpts{}, "uname", "-o")
+ if err != nil {
+ return nil, err
+ }
+ return &testbench.DUTUname{
+ Machine: strings.TrimRight(machine, "\n"),
+ KernelName: strings.TrimRight(kernelName, "\n"),
+ KernelRelease: strings.TrimRight(kernelRelease, "\n"),
+ KernelVersion: strings.TrimRight(kernelVersion, "\n"),
+ OperatingSystem: strings.TrimRight(operatingSystem, "\n"),
+ }, nil
+}
+
// Logs implements DUT.Logs.
func (dut *DockerDUT) Logs(ctx context.Context) (string, error) {
logs, err := dut.c.Logs(ctx)
@@ -545,11 +595,14 @@ func createDockerNetwork(ctx context.Context, n *dockerutil.Network) error {
// StartContainer will create a container instance from runOpts, connect it
// with the specified docker networks and start executing the specified cmd.
-func StartContainer(ctx context.Context, runOpts dockerutil.RunOpts, c *dockerutil.Container, containerAddr net.IP, ns []*dockerutil.Network, cmd ...string) error {
+func StartContainer(ctx context.Context, runOpts dockerutil.RunOpts, c *dockerutil.Container, containerAddr net.IP, ns []*dockerutil.Network, sysctls map[string]string, cmd ...string) error {
conf, hostconf, netconf := c.ConfigsFrom(runOpts, cmd...)
_ = netconf
hostconf.AutoRemove = true
hostconf.Sysctls = map[string]string{"net.ipv6.conf.all.disable_ipv6": "0"}
+ for k, v := range sysctls {
+ hostconf.Sysctls[k] = v
+ }
if err := c.CreateFrom(ctx, runOpts.Image, conf, hostconf, nil); err != nil {
return fmt.Errorf("unable to create container %s: %w", c.Name, err)
diff --git a/test/packetimpact/testbench/BUILD b/test/packetimpact/testbench/BUILD
index 983c2c030..43b4c7ca1 100644
--- a/test/packetimpact/testbench/BUILD
+++ b/test/packetimpact/testbench/BUILD
@@ -1,7 +1,6 @@
load("//tools:defs.bzl", "go_library", "go_test")
package(
- default_visibility = ["//test/packetimpact:__subpackages__"],
licenses = ["notice"],
)
@@ -15,6 +14,7 @@ go_library(
"rawsockets.go",
"testbench.go",
],
+ visibility = ["//test/packetimpact:__subpackages__"],
deps = [
"//pkg/tcpip",
"//pkg/tcpip/buffer",
diff --git a/test/packetimpact/testbench/connections.go b/test/packetimpact/testbench/connections.go
index 15e1a51de..bafc45d3b 100644
--- a/test/packetimpact/testbench/connections.go
+++ b/test/packetimpact/testbench/connections.go
@@ -677,17 +677,17 @@ func (conn *TCPIPv4) Connect(t *testing.T) {
t.Helper()
// Send the SYN.
- conn.Send(t, TCP{Flags: Uint8(header.TCPFlagSyn)})
+ conn.Send(t, TCP{Flags: TCPFlags(header.TCPFlagSyn)})
// Wait for the SYN-ACK.
- synAck, err := conn.Expect(t, TCP{Flags: Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
+ synAck, err := conn.Expect(t, TCP{Flags: TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("didn't get synack during handshake: %s", err)
}
conn.layerStates[len(conn.layerStates)-1].(*tcpState).synAck = synAck
// Send an ACK.
- conn.Send(t, TCP{Flags: Uint8(header.TCPFlagAck)})
+ conn.Send(t, TCP{Flags: TCPFlags(header.TCPFlagAck)})
}
// ConnectWithOptions performs a TCP 3-way handshake with given TCP options.
@@ -696,17 +696,17 @@ func (conn *TCPIPv4) ConnectWithOptions(t *testing.T, options []byte) {
t.Helper()
// Send the SYN.
- conn.Send(t, TCP{Flags: Uint8(header.TCPFlagSyn), Options: options})
+ conn.Send(t, TCP{Flags: TCPFlags(header.TCPFlagSyn), Options: options})
// Wait for the SYN-ACK.
- synAck, err := conn.Expect(t, TCP{Flags: Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
+ synAck, err := conn.Expect(t, TCP{Flags: TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("didn't get synack during handshake: %s", err)
}
conn.layerStates[len(conn.layerStates)-1].(*tcpState).synAck = synAck
// Send an ACK.
- conn.Send(t, TCP{Flags: Uint8(header.TCPFlagAck)})
+ conn.Send(t, TCP{Flags: TCPFlags(header.TCPFlagAck)})
}
// ExpectData is a convenient method that expects a Layer and the Layer after
diff --git a/test/packetimpact/testbench/dut.go b/test/packetimpact/testbench/dut.go
index be5121d98..f4c83ab96 100644
--- a/test/packetimpact/testbench/dut.go
+++ b/test/packetimpact/testbench/dut.go
@@ -19,7 +19,6 @@ import (
"encoding/binary"
"fmt"
"net"
- "syscall"
"testing"
"time"
@@ -35,24 +34,26 @@ type DUT struct {
conn *grpc.ClientConn
posixServer POSIXClient
Net *DUTTestNet
+ Uname *DUTUname
}
// NewDUT creates a new connection with the DUT over gRPC.
func NewDUT(t *testing.T) DUT {
t.Helper()
- n := GetDUTTestNet()
- dut := n.ConnectToDUT(t)
+ info := getDUTInfo()
+ dut := info.ConnectToDUT(t)
t.Cleanup(func() {
dut.TearDownConnection()
- dut.Net.Release()
+ info.release()
})
return dut
}
// ConnectToDUT connects to DUT through gRPC.
-func (n *DUTTestNet) ConnectToDUT(t *testing.T) DUT {
+func (info *DUTInfo) ConnectToDUT(t *testing.T) DUT {
t.Helper()
+ n := info.Net
posixServerAddress := net.JoinHostPort(n.POSIXServerIP.String(), fmt.Sprintf("%d", n.POSIXServerPort))
conn, err := grpc.Dial(posixServerAddress, grpc.WithInsecure(), grpc.WithKeepaliveParams(keepalive.ClientParameters{Timeout: RPCKeepalive}))
if err != nil {
@@ -63,6 +64,7 @@ func (n *DUTTestNet) ConnectToDUT(t *testing.T) DUT {
conn: conn,
posixServer: posixServer,
Net: n,
+ Uname: info.Uname,
}
}
@@ -196,7 +198,7 @@ func (dut *DUT) AcceptWithErrno(ctx context.Context, t *testing.T, sockfd int32)
if err != nil {
t.Fatalf("failed to call Accept: %s", err)
}
- return resp.GetFd(), dut.protoToSockaddr(t, resp.GetAddr()), syscall.Errno(resp.GetErrno_())
+ return resp.GetFd(), dut.protoToSockaddr(t, resp.GetAddr()), unix.Errno(resp.GetErrno_())
}
// Bind calls bind on the DUT and causes a fatal test failure if it doesn't
@@ -225,7 +227,7 @@ func (dut *DUT) BindWithErrno(ctx context.Context, t *testing.T, fd int32, sa un
if err != nil {
t.Fatalf("failed to call Bind: %s", err)
}
- return resp.GetRet(), syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), unix.Errno(resp.GetErrno_())
}
// Close calls close on the DUT and causes a fatal test failure if it doesn't
@@ -253,7 +255,7 @@ func (dut *DUT) CloseWithErrno(ctx context.Context, t *testing.T, fd int32) (int
if err != nil {
t.Fatalf("failed to call Close: %s", err)
}
- return resp.GetRet(), syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), unix.Errno(resp.GetErrno_())
}
// Connect calls connect on the DUT and causes a fatal test failure if it
@@ -267,7 +269,7 @@ func (dut *DUT) Connect(t *testing.T, fd int32, sa unix.Sockaddr) {
ret, err := dut.ConnectWithErrno(ctx, t, fd, sa)
// Ignore 'operation in progress' error that can be returned when the socket
// is non-blocking.
- if err != syscall.Errno(unix.EINPROGRESS) && ret != 0 {
+ if err != unix.EINPROGRESS && ret != 0 {
t.Fatalf("failed to connect socket: %s", err)
}
}
@@ -284,7 +286,7 @@ func (dut *DUT) ConnectWithErrno(ctx context.Context, t *testing.T, fd int32, sa
if err != nil {
t.Fatalf("failed to call Connect: %s", err)
}
- return resp.GetRet(), syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), unix.Errno(resp.GetErrno_())
}
// GetSockName calls getsockname on the DUT and causes a fatal test failure if
@@ -313,7 +315,7 @@ func (dut *DUT) GetSockNameWithErrno(ctx context.Context, t *testing.T, sockfd i
if err != nil {
t.Fatalf("failed to call Bind: %s", err)
}
- return resp.GetRet(), dut.protoToSockaddr(t, resp.GetAddr()), syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), dut.protoToSockaddr(t, resp.GetAddr()), unix.Errno(resp.GetErrno_())
}
func (dut *DUT) getSockOpt(ctx context.Context, t *testing.T, sockfd, level, optname, optlen int32, typ pb.GetSockOptRequest_SockOptType) (int32, *pb.SockOptVal, error) {
@@ -334,7 +336,7 @@ func (dut *DUT) getSockOpt(ctx context.Context, t *testing.T, sockfd, level, opt
if optval == nil {
t.Fatalf("GetSockOpt response does not contain a value")
}
- return resp.GetRet(), optval, syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), optval, unix.Errno(resp.GetErrno_())
}
// GetSockOpt calls getsockopt on the DUT and causes a fatal test failure if it
@@ -452,7 +454,7 @@ func (dut *DUT) ListenWithErrno(ctx context.Context, t *testing.T, sockfd, backl
if err != nil {
t.Fatalf("failed to call Listen: %s", err)
}
- return resp.GetRet(), syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), unix.Errno(resp.GetErrno_())
}
// PollOne calls poll on the DUT and asserts that the expected event must be
@@ -519,7 +521,7 @@ func (dut *DUT) PollWithErrno(ctx context.Context, t *testing.T, pfds []unix.Pol
Revents: int16(protoPfd.GetEvents()),
})
}
- return resp.GetRet(), result, syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), result, unix.Errno(resp.GetErrno_())
}
// Send calls send on the DUT and causes a fatal test failure if it doesn't
@@ -550,7 +552,7 @@ func (dut *DUT) SendWithErrno(ctx context.Context, t *testing.T, sockfd int32, b
if err != nil {
t.Fatalf("failed to call Send: %s", err)
}
- return resp.GetRet(), syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), unix.Errno(resp.GetErrno_())
}
// SendTo calls sendto on the DUT and causes a fatal test failure if it doesn't
@@ -582,7 +584,7 @@ func (dut *DUT) SendToWithErrno(ctx context.Context, t *testing.T, sockfd int32,
if err != nil {
t.Fatalf("failed to call SendTo: %s", err)
}
- return resp.GetRet(), syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), unix.Errno(resp.GetErrno_())
}
// SetNonBlocking will set O_NONBLOCK flag for fd if nonblocking
@@ -602,7 +604,7 @@ func (dut *DUT) SetNonBlocking(t *testing.T, fd int32, nonblocking bool) {
t.Fatalf("failed to call SetNonblocking: %s", err)
}
if resp.GetRet() == -1 {
- t.Fatalf("fcntl(%d, %s) failed: %s", fd, resp.GetCmd(), syscall.Errno(resp.GetErrno_()))
+ t.Fatalf("fcntl(%d, %s) failed: %s", fd, resp.GetCmd(), unix.Errno(resp.GetErrno_()))
}
}
@@ -619,7 +621,7 @@ func (dut *DUT) setSockOpt(ctx context.Context, t *testing.T, sockfd, level, opt
if err != nil {
t.Fatalf("failed to call SetSockOpt: %s", err)
}
- return resp.GetRet(), syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), unix.Errno(resp.GetErrno_())
}
// SetSockOpt calls setsockopt on the DUT and causes a fatal test failure if it
@@ -720,7 +722,7 @@ func (dut *DUT) SocketWithErrno(t *testing.T, domain, typ, proto int32) (int32,
if err != nil {
t.Fatalf("failed to call Socket: %s", err)
}
- return resp.GetFd(), syscall.Errno(resp.GetErrno_())
+ return resp.GetFd(), unix.Errno(resp.GetErrno_())
}
// Recv calls recv on the DUT and causes a fatal test failure if it doesn't
@@ -751,7 +753,7 @@ func (dut *DUT) RecvWithErrno(ctx context.Context, t *testing.T, sockfd, len, fl
if err != nil {
t.Fatalf("failed to call Recv: %s", err)
}
- return resp.GetRet(), resp.GetBuf(), syscall.Errno(resp.GetErrno_())
+ return resp.GetRet(), resp.GetBuf(), unix.Errno(resp.GetErrno_())
}
// SetSockLingerOption sets SO_LINGER socket option on the DUT.
@@ -791,5 +793,5 @@ func (dut *DUT) ShutdownWithErrno(ctx context.Context, t *testing.T, fd, how int
if err != nil {
t.Fatalf("failed to call Shutdown: %s", err)
}
- return syscall.Errno(resp.GetErrno_())
+ return unix.Errno(resp.GetErrno_())
}
diff --git a/test/packetimpact/testbench/layers.go b/test/packetimpact/testbench/layers.go
index 19e6b8d7d..2311f7686 100644
--- a/test/packetimpact/testbench/layers.go
+++ b/test/packetimpact/testbench/layers.go
@@ -407,6 +407,12 @@ func Uint8(v uint8) *uint8 {
return &v
}
+// TCPFlags is a helper routine that allocates a new
+// header.TCPFlags value to store v and returns a pointer to it.
+func TCPFlags(v header.TCPFlags) *header.TCPFlags {
+ return &v
+}
+
// Address is a helper routine that allocates a new tcpip.Address value to
// store v and returns a pointer to it.
func Address(v tcpip.Address) *tcpip.Address {
@@ -852,7 +858,13 @@ func (l *ICMPv6) ToBytes() ([]byte, error) {
if err != nil {
return nil, err
}
- h.SetChecksum(header.ICMPv6Checksum(h, *ipv6.SrcAddr, *ipv6.DstAddr, payload))
+ h.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: h,
+ Src: *ipv6.SrcAddr,
+ Dst: *ipv6.DstAddr,
+ PayloadCsum: header.ChecksumVV(payload, 0 /* initial */),
+ PayloadLen: payload.Size(),
+ }))
break
}
}
@@ -974,7 +986,7 @@ func (l *ICMPv4) ToBytes() ([]byte, error) {
var vv buffer.VectorisedView
vv.AppendView(buffer.View(l.Payload))
vv.Append(payload)
- h.SetChecksum(header.ICMPv4Checksum(h, vv))
+ h.SetChecksum(header.ICMPv4Checksum(h, header.ChecksumVV(vv, 0 /* initial */)))
}
return h, nil
@@ -1024,7 +1036,7 @@ type TCP struct {
SeqNum *uint32
AckNum *uint32
DataOffset *uint8
- Flags *uint8
+ Flags *header.TCPFlags
WindowSize *uint16
Checksum *uint16
UrgentPointer *uint16
@@ -1057,7 +1069,7 @@ func (l *TCP) ToBytes() ([]byte, error) {
h.SetDataOffset(uint8(l.length()))
}
if l.Flags != nil {
- h.SetFlags(*l.Flags)
+ h.SetFlags(uint8(*l.Flags))
}
if l.WindowSize != nil {
h.SetWindowSize(*l.WindowSize)
@@ -1151,7 +1163,7 @@ func parseTCP(b []byte) (Layer, layerParser) {
SeqNum: Uint32(h.SequenceNumber()),
AckNum: Uint32(h.AckNumber()),
DataOffset: Uint8(h.DataOffset()),
- Flags: Uint8(h.Flags()),
+ Flags: TCPFlags(h.Flags()),
WindowSize: Uint16(h.WindowSize()),
Checksum: Uint16(h.Checksum()),
UrgentPointer: Uint16(h.UrgentPointer()),
diff --git a/test/packetimpact/testbench/layers_test.go b/test/packetimpact/testbench/layers_test.go
index eca0780b5..614a5de1e 100644
--- a/test/packetimpact/testbench/layers_test.go
+++ b/test/packetimpact/testbench/layers_test.go
@@ -178,7 +178,7 @@ func TestLayerStringFormat(t *testing.T) {
SeqNum: Uint32(3452155723),
AckNum: Uint32(2596996163),
DataOffset: Uint8(5),
- Flags: Uint8(20),
+ Flags: TCPFlags(header.TCPFlagRst | header.TCPFlagAck),
WindowSize: Uint16(64240),
Checksum: Uint16(0x2e2b),
},
@@ -188,7 +188,7 @@ func TestLayerStringFormat(t *testing.T) {
"SeqNum:3452155723 " +
"AckNum:2596996163 " +
"DataOffset:5 " +
- "Flags:20 " +
+ "Flags: R A " +
"WindowSize:64240 " +
"Checksum:11819" +
"}",
@@ -436,7 +436,7 @@ func TestTCPOptions(t *testing.T) {
DstPort: Uint16(54321),
SeqNum: Uint32(0),
AckNum: Uint32(0),
- Flags: Uint8(header.TCPFlagSyn),
+ Flags: TCPFlags(header.TCPFlagSyn),
WindowSize: Uint16(8192),
Checksum: Uint16(0xf51c),
UrgentPointer: Uint16(0),
@@ -480,7 +480,7 @@ func TestTCPOptions(t *testing.T) {
DstPort: Uint16(54321),
SeqNum: Uint32(0),
AckNum: Uint32(0),
- Flags: Uint8(header.TCPFlagSyn),
+ Flags: TCPFlags(header.TCPFlagSyn),
WindowSize: Uint16(8192),
Checksum: Uint16(0xe521),
UrgentPointer: Uint16(0),
diff --git a/test/packetimpact/testbench/testbench.go b/test/packetimpact/testbench/testbench.go
index 891897d55..a73c07e64 100644
--- a/test/packetimpact/testbench/testbench.go
+++ b/test/packetimpact/testbench/testbench.go
@@ -34,14 +34,29 @@ var (
// RPCTimeout is the gRPC timeout.
RPCTimeout = 100 * time.Millisecond
- // dutTestNetsJSON is the json string that describes all the test networks to
+ // dutInfosJSON is the json string that describes information about all the
// duts available to use.
- dutTestNetsJSON string
- // dutTestNets is the pool among which the testbench can choose a DUT to work
+ dutInfosJSON string
+ // dutInfo is the pool among which the testbench can choose a DUT to work
// with.
- dutTestNets chan *DUTTestNet
+ dutInfo chan *DUTInfo
)
+// DUTInfo has both network and uname information about the DUT.
+type DUTInfo struct {
+ Uname *DUTUname
+ Net *DUTTestNet
+}
+
+// DUTUname contains information about the DUT from uname.
+type DUTUname struct {
+ Machine string
+ KernelName string
+ KernelRelease string
+ KernelVersion string
+ OperatingSystem string
+}
+
// DUTTestNet describes the test network setup on dut and how the testbench
// should connect with an existing DUT.
type DUTTestNet struct {
@@ -86,7 +101,7 @@ func registerFlags(fs *flag.FlagSet) {
fs.BoolVar(&Native, "native", Native, "whether the test is running natively")
fs.DurationVar(&RPCTimeout, "rpc_timeout", RPCTimeout, "gRPC timeout")
fs.DurationVar(&RPCKeepalive, "rpc_keepalive", RPCKeepalive, "gRPC keepalive")
- fs.StringVar(&dutTestNetsJSON, "dut_test_nets_json", dutTestNetsJSON, "path to the dut test nets json file")
+ fs.StringVar(&dutInfosJSON, "dut_infos_json", dutInfosJSON, "json that describes the DUTs")
}
// Initialize initializes the testbench, it parse the flags and sets up the
@@ -94,27 +109,27 @@ func registerFlags(fs *flag.FlagSet) {
func Initialize(fs *flag.FlagSet) {
registerFlags(fs)
flag.Parse()
- if err := loadDUTTestNets(); err != nil {
+ if err := loadDUTInfos(); err != nil {
panic(err)
}
}
-// loadDUTTestNets loads available DUT test networks from the json file, it
+// loadDUTInfos loads available DUT test infos from the json file, it
// must be called after flag.Parse().
-func loadDUTTestNets() error {
- var parsedTestNets []DUTTestNet
- if err := json.Unmarshal([]byte(dutTestNetsJSON), &parsedTestNets); err != nil {
+func loadDUTInfos() error {
+ var dutInfos []DUTInfo
+ if err := json.Unmarshal([]byte(dutInfosJSON), &dutInfos); err != nil {
return fmt.Errorf("failed to unmarshal JSON: %w", err)
}
- if got, want := len(parsedTestNets), 1; got < want {
+ if got, want := len(dutInfos), 1; got < want {
return fmt.Errorf("got %d DUTs, the test requires at least %d DUTs", got, want)
}
// Using a buffered channel as semaphore
- dutTestNets = make(chan *DUTTestNet, len(parsedTestNets))
- for i := range parsedTestNets {
- parsedTestNets[i].LocalIPv4 = parsedTestNets[i].LocalIPv4.To4()
- parsedTestNets[i].RemoteIPv4 = parsedTestNets[i].RemoteIPv4.To4()
- dutTestNets <- &parsedTestNets[i]
+ dutInfo = make(chan *DUTInfo, len(dutInfos))
+ for i := range dutInfos {
+ dutInfos[i].Net.LocalIPv4 = dutInfos[i].Net.LocalIPv4.To4()
+ dutInfos[i].Net.RemoteIPv4 = dutInfos[i].Net.RemoteIPv4.To4()
+ dutInfo <- &dutInfos[i]
}
return nil
}
@@ -130,14 +145,13 @@ func GenerateRandomPayload(t *testing.T, n int) []byte {
return buf
}
-// GetDUTTestNet gets a usable DUTTestNet, the function will block until any
-// becomes available.
-func GetDUTTestNet() *DUTTestNet {
- return <-dutTestNets
+// getDUTInfo returns information about an available DUT from the pool. If no
+// DUT is readily available, getDUTInfo blocks until one becomes available.
+func getDUTInfo() *DUTInfo {
+ return <-dutInfo
}
-// Release releases the DUTTestNet back to the pool so that some other test
-// can use.
-func (n *DUTTestNet) Release() {
- dutTestNets <- n
+// release returns the DUTInfo back to the pool.
+func (info *DUTInfo) release() {
+ dutInfo <- info
}
diff --git a/test/packetimpact/tests/fin_wait2_timeout_test.go b/test/packetimpact/tests/fin_wait2_timeout_test.go
index 11f0fcd1e..cff8ca51d 100644
--- a/test/packetimpact/tests/fin_wait2_timeout_test.go
+++ b/test/packetimpact/tests/fin_wait2_timeout_test.go
@@ -51,21 +51,21 @@ func TestFinWait2Timeout(t *testing.T) {
}
dut.Close(t, acceptFd)
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
t.Fatalf("expected a FIN-ACK within 1 second but got none: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
time.Sleep(5 * time.Second)
conn.Drain(t)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
if tt.linger2 {
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, time.Second); err != nil {
t.Fatalf("expected a RST packet within a second but got none: %s", err)
}
} else {
- if got, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, 10*time.Second); got != nil || err == nil {
+ if got, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, 10*time.Second); got != nil || err == nil {
t.Fatalf("expected no RST packets within ten seconds but got one: %s", got)
}
}
diff --git a/test/packetimpact/tests/ipv4_fragment_reassembly_test.go b/test/packetimpact/tests/ipv4_fragment_reassembly_test.go
index ee050e2c6..707f0f1f5 100644
--- a/test/packetimpact/tests/ipv4_fragment_reassembly_test.go
+++ b/test/packetimpact/tests/ipv4_fragment_reassembly_test.go
@@ -21,7 +21,6 @@ import (
"time"
"github.com/google/go-cmp/cmp"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/test/packetimpact/testbench"
)
@@ -118,10 +117,7 @@ func TestIPv4FragmentReassembly(t *testing.T) {
if _, err := rand.Read(originalPayload); err != nil {
t.Fatalf("rand.Read: %s", err)
}
- cksum := header.ICMPv4Checksum(
- icmp,
- buffer.NewVectorisedView(len(originalPayload), []buffer.View{originalPayload}),
- )
+ cksum := header.ICMPv4Checksum(icmp, header.Checksum(originalPayload, 0 /* initial */))
icmp.SetChecksum(cksum)
for _, fragment := range test.fragments {
diff --git a/test/packetimpact/tests/ipv4_id_uniqueness_test.go b/test/packetimpact/tests/ipv4_id_uniqueness_test.go
index a63b41366..2b69ceecb 100644
--- a/test/packetimpact/tests/ipv4_id_uniqueness_test.go
+++ b/test/packetimpact/tests/ipv4_id_uniqueness_test.go
@@ -100,7 +100,7 @@ func TestIPv4RetransmitIdentificationUniqueness(t *testing.T) {
// Let the DUT estimate RTO with RTT from the DATA-ACK.
// TODO(gvisor.dev/issue/2685) Estimate RTO during handshake, after which
// we can skip sending this ACK.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
dut.Send(t, remoteFD, tc.payload, 0)
expectTCP := &testbench.TCP{SeqNum: testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))}
diff --git a/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go b/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go
index e0b2a2178..4034a128e 100644
--- a/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go
+++ b/test/packetimpact/tests/ipv6_fragment_icmp_error_test.go
@@ -21,7 +21,6 @@ import (
"github.com/google/go-cmp/cmp"
"gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
"gvisor.dev/gvisor/test/packetimpact/testbench"
@@ -45,12 +44,13 @@ func fragmentedICMPEchoRequest(t *testing.T, n *testbench.DUTTestNet, conn *test
icmpv6Header.SetCode(header.ICMPv6UnusedCode)
icmpv6Header.SetIdent(0)
icmpv6Header.SetSequence(0)
- cksum := header.ICMPv6Checksum(
- icmpv6Header,
- tcpip.Address(n.LocalIPv6),
- tcpip.Address(n.RemoteIPv6),
- buffer.NewVectorisedView(len(payload), []buffer.View{payload}),
- )
+ cksum := header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmpv6Header,
+ Src: tcpip.Address(n.LocalIPv6),
+ Dst: tcpip.Address(n.RemoteIPv6),
+ PayloadCsum: header.Checksum(payload, 0 /* initial */),
+ PayloadLen: len(payload),
+ })
icmpv6Header.SetChecksum(cksum)
icmpv6Bytes := append([]byte(icmpv6Header), payload...)
diff --git a/test/packetimpact/tests/ipv6_fragment_reassembly_test.go b/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
index dd98ee7a1..db6195dc9 100644
--- a/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
+++ b/test/packetimpact/tests/ipv6_fragment_reassembly_test.go
@@ -22,7 +22,6 @@ import (
"github.com/google/go-cmp/cmp"
"gvisor.dev/gvisor/pkg/tcpip"
- "gvisor.dev/gvisor/pkg/tcpip/buffer"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/test/packetimpact/testbench"
)
@@ -121,12 +120,13 @@ func TestIPv6FragmentReassembly(t *testing.T) {
t.Fatalf("rand.Read: %s", err)
}
- cksum := header.ICMPv6Checksum(
- icmp,
- lIP,
- rIP,
- buffer.NewVectorisedView(len(originalPayload), []buffer.View{originalPayload}),
- )
+ cksum := header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
+ Header: icmp,
+ Src: lIP,
+ Dst: rIP,
+ PayloadCsum: header.Checksum(originalPayload, 0 /* initial */),
+ PayloadLen: len(originalPayload),
+ })
icmp.SetChecksum(cksum)
for _, fragment := range test.fragments {
diff --git a/test/packetimpact/tests/tcp_cork_mss_test.go b/test/packetimpact/tests/tcp_cork_mss_test.go
index a7ba5035e..1db3c9883 100644
--- a/test/packetimpact/tests/tcp_cork_mss_test.go
+++ b/test/packetimpact/tests/tcp_cork_mss_test.go
@@ -60,24 +60,24 @@ func TestTCPCorkMSS(t *testing.T) {
// Expect the segments to be coalesced and sent and capped to MSS.
expectedPayload := testbench.Payload{Bytes: expectedData[:mss]}
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, &expectedPayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, &expectedPayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
// Expect the coalesced segment to be split and transmitted.
expectedPayload = testbench.Payload{Bytes: expectedData[mss:]}
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, &expectedPayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, &expectedPayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
// Check for segments to *not* be held up because of TCP_CORK when
// the current send window is less than MSS.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(uint16(2 * len(sampleData)))})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(uint16(2 * len(sampleData)))})
dut.Send(t, acceptFD, sampleData, 0)
dut.Send(t, acceptFD, sampleData, 0)
expectedPayload = testbench.Payload{Bytes: append(sampleData, sampleData...)}
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, &expectedPayload, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, &expectedPayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
}
diff --git a/test/packetimpact/tests/tcp_handshake_window_size_test.go b/test/packetimpact/tests/tcp_handshake_window_size_test.go
index 5d1266f3c..668e0275c 100644
--- a/test/packetimpact/tests/tcp_handshake_window_size_test.go
+++ b/test/packetimpact/tests/tcp_handshake_window_size_test.go
@@ -38,8 +38,8 @@ func TestTCPHandshakeWindowSize(t *testing.T) {
defer conn.Close(t)
// Start handshake with zero window size.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn), WindowSize: testbench.Uint16(uint16(0))})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn), WindowSize: testbench.Uint16(uint16(0))})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected SYN-ACK: %s", err)
}
// Update the advertised window size to a non-zero value with the ACK that
@@ -47,7 +47,7 @@ func TestTCPHandshakeWindowSize(t *testing.T) {
//
// Set the window size with MSB set and expect the dut to treat it as
// an unsigned value.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(uint16(1 << 15))})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(uint16(1 << 15))})
acceptFd, _ := dut.Accept(t, listenFD)
defer dut.Close(t, acceptFd)
@@ -59,7 +59,7 @@ func TestTCPHandshakeWindowSize(t *testing.T) {
// expect the dut to honor the recently advertised non-zero window
// and actually send out the data instead of probing for zero window.
dut.Send(t, acceptFd, sampleData, 0)
- if _, err := conn.ExpectNextData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload, time.Second); err != nil {
+ if _, err := conn.ExpectNextData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
}
diff --git a/test/packetimpact/tests/tcp_info_test.go b/test/packetimpact/tests/tcp_info_test.go
index 69275e54b..3fc2c7fe5 100644
--- a/test/packetimpact/tests/tcp_info_test.go
+++ b/test/packetimpact/tests/tcp_info_test.go
@@ -51,7 +51,7 @@ func TestTCPInfo(t *testing.T) {
if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected a packet with payload %v: %s", samplePayload, err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
info := linux.TCPInfo{}
infoBytes := dut.GetSockOpt(t, acceptFD, unix.SOL_TCP, unix.TCP_INFO, int32(linux.SizeOfTCPInfo))
diff --git a/test/packetimpact/tests/tcp_linger_test.go b/test/packetimpact/tests/tcp_linger_test.go
index bc4b64388..88942904d 100644
--- a/test/packetimpact/tests/tcp_linger_test.go
+++ b/test/packetimpact/tests/tcp_linger_test.go
@@ -17,7 +17,6 @@ package tcp_linger_test
import (
"context"
"flag"
- "syscall"
"testing"
"time"
@@ -58,10 +57,10 @@ func TestTCPLingerZeroTimeout(t *testing.T) {
dut.Close(t, acceptFD)
// If the linger timeout is set to zero, the DUT should send a RST.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)}, time.Second); err != nil {
t.Errorf("expected RST-ACK packet within a second but got none: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
}
// TestTCPLingerOff tests when SO_LINGER is not set. DUT should send FIN-ACK
@@ -75,10 +74,10 @@ func TestTCPLingerOff(t *testing.T) {
dut.Close(t, acceptFD)
// If SO_LINGER is not set, DUT should send a FIN-ACK.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
t.Errorf("expected FIN-ACK packet within a second but got none: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
}
// TestTCPLingerNonZeroTimeout tests when SO_LINGER is set with non-zero timeout.
@@ -115,10 +114,10 @@ func TestTCPLingerNonZeroTimeout(t *testing.T) {
t.Errorf("expected close to return within a second, but returned later")
}
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
t.Errorf("expected FIN-ACK packet within a second but got none: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
})
}
}
@@ -166,10 +165,10 @@ func TestTCPLingerSendNonZeroTimeout(t *testing.T) {
t.Fatalf("expected a packet with payload %v: %s", samplePayload, err)
}
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
t.Errorf("expected FIN-ACK packet within a second but got none: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
})
}
}
@@ -183,19 +182,19 @@ func TestTCPLingerShutdownZeroTimeout(t *testing.T) {
defer closeAll(t, dut, listenFD, conn)
dut.SetSockLingerOption(t, acceptFD, 0, true)
- dut.Shutdown(t, acceptFD, syscall.SHUT_RDWR)
+ dut.Shutdown(t, acceptFD, unix.SHUT_RDWR)
dut.Close(t, acceptFD)
// Shutdown will send FIN-ACK with read/write option.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
t.Errorf("expected FIN-ACK packet within a second but got none: %s", err)
}
// If the linger timeout is set to zero, the DUT should send a RST.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)}, time.Second); err != nil {
t.Errorf("expected RST-ACK packet within a second but got none: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
}
// TestTCPLingerShutdownSendNonZeroTimeout tests SO_LINGER with shutdown() and
@@ -220,7 +219,7 @@ func TestTCPLingerShutdownSendNonZeroTimeout(t *testing.T) {
sampleData := []byte("Sample Data")
dut.Send(t, acceptFD, sampleData, 0)
- dut.Shutdown(t, acceptFD, syscall.SHUT_RDWR)
+ dut.Shutdown(t, acceptFD, unix.SHUT_RDWR)
// Increase timeout as Close will take longer time to
// return when SO_LINGER is set with non-zero timeout.
@@ -243,10 +242,10 @@ func TestTCPLingerShutdownSendNonZeroTimeout(t *testing.T) {
t.Fatalf("expected a packet with payload %v: %s", samplePayload, err)
}
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second); err != nil {
t.Errorf("expected FIN-ACK packet within a second but got none: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
})
}
}
diff --git a/test/packetimpact/tests/tcp_network_unreachable_test.go b/test/packetimpact/tests/tcp_network_unreachable_test.go
index 53dc903e4..5168450ad 100644
--- a/test/packetimpact/tests/tcp_network_unreachable_test.go
+++ b/test/packetimpact/tests/tcp_network_unreachable_test.go
@@ -17,7 +17,6 @@ package tcp_synsent_reset_test
import (
"context"
"flag"
- "syscall"
"testing"
"time"
@@ -46,12 +45,12 @@ func TestTCPSynSentUnreachable(t *testing.T) {
defer cancel()
sa := unix.SockaddrInet4{Port: int(port)}
copy(sa.Addr[:], dut.Net.LocalIPv4)
- if _, err := dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != syscall.Errno(unix.EINPROGRESS) {
+ if _, err := dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != unix.EINPROGRESS {
t.Errorf("got connect() = %v, want EINPROGRESS", err)
}
// Get the SYN.
- tcpLayers, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, nil, time.Second)
+ tcpLayers, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, nil, time.Second)
if err != nil {
t.Fatalf("expected SYN: %s", err)
}
@@ -100,12 +99,12 @@ func TestTCPSynSentUnreachable6(t *testing.T) {
ZoneId: dut.Net.RemoteDevID,
}
copy(sa.Addr[:], dut.Net.LocalIPv6)
- if _, err := dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != syscall.Errno(unix.EINPROGRESS) {
+ if _, err := dut.ConnectWithErrno(ctx, t, clientFD, &sa); err != unix.EINPROGRESS {
t.Errorf("got connect() = %v, want EINPROGRESS", err)
}
// Get the SYN.
- tcpLayers, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, nil, time.Second)
+ tcpLayers, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, nil, time.Second)
if err != nil {
t.Fatalf("expected SYN: %s", err)
}
@@ -156,7 +155,7 @@ func getConnectError(t *testing.T, dut *testbench.DUT, fd int32) error {
// failure).
dut.PollOne(t, fd, unix.POLLOUT, 10*time.Second)
if errno := dut.GetSockOptInt(t, fd, unix.SOL_SOCKET, unix.SO_ERROR); errno != 0 {
- return syscall.Errno(errno)
+ return unix.Errno(errno)
}
return nil
}
diff --git a/test/packetimpact/tests/tcp_noaccept_close_rst_test.go b/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
index d2871df08..14eb7d93b 100644
--- a/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
+++ b/test/packetimpact/tests/tcp_noaccept_close_rst_test.go
@@ -40,7 +40,7 @@ func TestTcpNoAcceptCloseReset(t *testing.T) {
// it will only respond RST instead of RST+ACK.
dut.PollOne(t, listenFd, unix.POLLIN, time.Second)
dut.Close(t, listenFd)
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)}, 1*time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)}, 1*time.Second); err != nil {
t.Fatalf("expected a RST-ACK packet but got none: %s", err)
}
}
diff --git a/test/packetimpact/tests/tcp_outside_the_window_test.go b/test/packetimpact/tests/tcp_outside_the_window_test.go
index 8909a348e..e442268be 100644
--- a/test/packetimpact/tests/tcp_outside_the_window_test.go
+++ b/test/packetimpact/tests/tcp_outside_the_window_test.go
@@ -37,7 +37,7 @@ func init() {
func TestTCPOutsideTheWindow(t *testing.T) {
for _, tt := range []struct {
description string
- tcpFlags uint8
+ tcpFlags header.TCPFlags
payload []testbench.Layer
seqNumOffset seqnum.Size
expectACK bool
@@ -76,11 +76,11 @@ func TestTCPOutsideTheWindow(t *testing.T) {
// to the AckNum.
localSeqNum := testbench.Uint32(uint32(*conn.LocalSeqNum(t)))
conn.Send(t, testbench.TCP{
- Flags: testbench.Uint8(tt.tcpFlags),
+ Flags: testbench.TCPFlags(tt.tcpFlags),
SeqNum: testbench.Uint32(uint32(conn.LocalSeqNum(t).Add(windowSize))),
}, tt.payload...)
timeout := 3 * time.Second
- gotACK, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: localSeqNum}, timeout)
+ gotACK, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: localSeqNum}, timeout)
if tt.expectACK && err != nil {
t.Fatalf("expected an ACK packet within %s but got none: %s", timeout, err)
}
@@ -93,11 +93,11 @@ func TestTCPOutsideTheWindow(t *testing.T) {
// has passed since the last ACK was sent.
t.Logf("sending another segment")
conn.Send(t, testbench.TCP{
- Flags: testbench.Uint8(tt.tcpFlags),
+ Flags: testbench.TCPFlags(tt.tcpFlags),
SeqNum: testbench.Uint32(uint32(conn.LocalSeqNum(t).Add(windowSize))),
}, tt.payload...)
timeout := 3 * time.Second
- gotACK, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: localSeqNum}, timeout)
+ gotACK, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: localSeqNum}, timeout)
if err == nil {
t.Fatalf("expected no ACK packet but got one: %s", gotACK)
}
diff --git a/test/packetimpact/tests/tcp_paws_mechanism_test.go b/test/packetimpact/tests/tcp_paws_mechanism_test.go
index 24d9ef4ec..9054955ea 100644
--- a/test/packetimpact/tests/tcp_paws_mechanism_test.go
+++ b/test/packetimpact/tests/tcp_paws_mechanism_test.go
@@ -38,8 +38,8 @@ func TestPAWSMechanism(t *testing.T) {
options := make([]byte, header.TCPOptionTSLength)
header.EncodeTSOption(currentTS(), 0, options)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn), Options: options})
- synAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn), Options: options})
+ synAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("didn't get synack during handshake: %s", err)
}
@@ -49,7 +49,7 @@ func TestPAWSMechanism(t *testing.T) {
}
tsecr := parsedSynOpts.TSVal
header.EncodeTSOption(currentTS(), tsecr, options)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), Options: options})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), Options: options})
acceptFD, _ := dut.Accept(t, listenFD)
defer dut.Close(t, acceptFD)
@@ -60,9 +60,9 @@ func TestPAWSMechanism(t *testing.T) {
// every time we send one, it should not cause any flakiness because timestamps
// only need to be non-decreasing.
time.Sleep(3 * time.Millisecond)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), Options: options}, &testbench.Payload{Bytes: sampleData})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), Options: options}, &testbench.Payload{Bytes: sampleData})
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected an ACK but got none: %s", err)
}
@@ -85,9 +85,9 @@ func TestPAWSMechanism(t *testing.T) {
// 3ms here is chosen arbitrarily and this time.Sleep() should not cause flakiness
// due to the exact same reasoning discussed above.
time.Sleep(3 * time.Millisecond)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), Options: options}, &testbench.Payload{Bytes: sampleData})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), Options: options}, &testbench.Payload{Bytes: sampleData})
- gotTCP, err = conn.Expect(t, testbench.TCP{AckNum: lastAckNum, Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ gotTCP, err = conn.Expect(t, testbench.TCP{AckNum: lastAckNum, Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected segment with AckNum %d but got none: %s", lastAckNum, err)
}
diff --git a/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go b/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go
index 7dd1c326a..1c8b72ebe 100644
--- a/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go
+++ b/test/packetimpact/tests/tcp_queue_send_recv_in_syn_sent_test.go
@@ -21,7 +21,6 @@ import (
"errors"
"flag"
"sync"
- "syscall"
"testing"
"time"
@@ -45,10 +44,10 @@ func TestQueueSendInSynSentHandshake(t *testing.T) {
sampleData := []byte("Sample Data")
dut.SetNonBlocking(t, socket, true)
- if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, syscall.EINPROGRESS) {
+ if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, unix.EINPROGRESS) {
t.Fatalf("failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS", err)
}
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second); err != nil {
t.Fatalf("expected a SYN from DUT, but got none: %s", err)
}
@@ -85,19 +84,19 @@ func TestQueueSendInSynSentHandshake(t *testing.T) {
time.Sleep(100 * time.Millisecond)
// Bring the connection to Established.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)})
// Expect the data from the DUT's enqueued send request.
//
// On Linux, this can be piggybacked with the ACK completing the
// handshake. On gVisor, getting such a piggyback is a bit more
// complicated because the actual data enqueuing occurs in the
// callers of endpoint Write.
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData}, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData}, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData})
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData})
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
t.Fatalf("expected an ACK from DUT, but got none: %s", err)
}
}
@@ -113,15 +112,15 @@ func TestQueueRecvInSynSentHandshake(t *testing.T) {
sampleData := []byte("Sample Data")
dut.SetNonBlocking(t, socket, true)
- if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, syscall.EINPROGRESS) {
+ if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, unix.EINPROGRESS) {
t.Fatalf("failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS", err)
}
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second); err != nil {
t.Fatalf("expected a SYN from DUT, but got none: %s", err)
}
- if _, _, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0); err != syscall.Errno(unix.EWOULDBLOCK) {
- t.Fatalf("expected error %s, got %s", syscall.Errno(unix.EWOULDBLOCK), err)
+ if _, _, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0); err != unix.EWOULDBLOCK {
+ t.Fatalf("expected error %s, got %s", unix.EWOULDBLOCK, err)
}
// Test blocking read.
@@ -160,14 +159,14 @@ func TestQueueRecvInSynSentHandshake(t *testing.T) {
time.Sleep(100 * time.Millisecond)
// Bring the connection to Established.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)})
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)})
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
t.Fatalf("expected an ACK from DUT, but got none: %s", err)
}
// Send sample payload so that DUT can recv.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData})
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagPsh | header.TCPFlagAck)}, &testbench.Payload{Bytes: sampleData})
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
t.Fatalf("expected an ACK from DUT, but got none: %s", err)
}
}
@@ -183,10 +182,10 @@ func TestQueueSendInSynSentRST(t *testing.T) {
sampleData := []byte("Sample Data")
dut.SetNonBlocking(t, socket, true)
- if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, syscall.EINPROGRESS) {
+ if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, unix.EINPROGRESS) {
t.Fatalf("failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS", err)
}
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second); err != nil {
t.Fatalf("expected a SYN from DUT, but got none: %s", err)
}
@@ -207,8 +206,8 @@ func TestQueueSendInSynSentRST(t *testing.T) {
// Issue SEND call in SYN-SENT, this should be queued for
// process until the connection is established.
n, err := dut.SendWithErrno(ctx, t, socket, sampleData, 0)
- if err != syscall.Errno(unix.ECONNREFUSED) {
- t.Errorf("expected error %s, got %s", syscall.Errno(unix.ECONNREFUSED), err)
+ if err != unix.ECONNREFUSED {
+ t.Errorf("expected error %s, got %s", unix.ECONNREFUSED, err)
}
if n != -1 {
t.Errorf("expected return value %d, got %d", -1, n)
@@ -224,7 +223,7 @@ func TestQueueSendInSynSentRST(t *testing.T) {
// request and the system actually being blocked.
time.Sleep(100 * time.Millisecond)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)})
}
// TestQueueRecvInSynSentRST tests recv behavior when the TCP state
@@ -238,15 +237,15 @@ func TestQueueRecvInSynSentRST(t *testing.T) {
sampleData := []byte("Sample Data")
dut.SetNonBlocking(t, socket, true)
- if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, syscall.EINPROGRESS) {
+ if _, err := dut.ConnectWithErrno(context.Background(), t, socket, conn.LocalAddr(t)); !errors.Is(err, unix.EINPROGRESS) {
t.Fatalf("failed to bring DUT to SYN-SENT, got: %s, want EINPROGRESS", err)
}
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, time.Second); err != nil {
t.Fatalf("expected a SYN from DUT, but got none: %s", err)
}
- if _, _, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0); err != syscall.Errno(unix.EWOULDBLOCK) {
- t.Fatalf("expected error %s, got %s", syscall.Errno(unix.EWOULDBLOCK), err)
+ if _, _, err := dut.RecvWithErrno(context.Background(), t, socket, int32(len(sampleData)), 0); err != unix.EWOULDBLOCK {
+ t.Fatalf("expected error %s, got %s", unix.EWOULDBLOCK, err)
}
// Test blocking read.
@@ -266,8 +265,8 @@ func TestQueueRecvInSynSentRST(t *testing.T) {
// Issue RECEIVE call in SYN-SENT, this should be queued for
// process until the connection is established.
n, _, err := dut.RecvWithErrno(ctx, t, socket, int32(len(sampleData)), 0)
- if err != syscall.Errno(unix.ECONNREFUSED) {
- t.Errorf("expected error %s, got %s", syscall.Errno(unix.ECONNREFUSED), err)
+ if err != unix.ECONNREFUSED {
+ t.Errorf("expected error %s, got %s", unix.ECONNREFUSED, err)
}
if n != -1 {
t.Errorf("expected return value %d, got %d", -1, n)
@@ -283,5 +282,5 @@ func TestQueueRecvInSynSentRST(t *testing.T) {
// request and the system actually being blocked.
time.Sleep(100 * time.Millisecond)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)})
}
diff --git a/test/packetimpact/tests/tcp_rack_test.go b/test/packetimpact/tests/tcp_rack_test.go
index ef902c54d..0a5b0f12b 100644
--- a/test/packetimpact/tests/tcp_rack_test.go
+++ b/test/packetimpact/tests/tcp_rack_test.go
@@ -97,7 +97,7 @@ func sendAndReceive(t *testing.T, dut testbench.DUT, conn testbench.TCPIPv4, num
if sendACK {
time.Sleep(simulatedRTT)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(sn))})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(sn))})
}
}
return lastSent
@@ -149,7 +149,7 @@ func TestRACKTLPLost(t *testing.T) {
// Cumulative ACK for #[1-5] packets.
ackNum := seqNum1.Add(seqnum.Size(6 * payloadSize))
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(ackNum))})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(ackNum))})
// Probe Timeout (PTO) should be two times RTT. Check that the last
// packet is retransmitted after probe timeout.
@@ -194,7 +194,7 @@ func TestRACKWithSACK(t *testing.T) {
sbOff += header.EncodeSACKBlocks([]header.SACKBlock{{
start, end,
}}, sackBlock[sbOff:])
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
rtt, _ := getRTTAndRTO(t, dut, acceptFd)
timeout := 2 * rtt
@@ -206,7 +206,7 @@ func TestRACKWithSACK(t *testing.T) {
time.Sleep(simulatedRTT)
// ACK for #1 packet.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(end))})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(end))})
// RACK considers transmission times of the packets to mark them lost.
// As the 3rd packet was sent before the retransmitted 1st packet, RACK
@@ -243,7 +243,7 @@ func TestRACKWithoutReorder(t *testing.T) {
start, end,
}}, sackBlock[sbOff:])
time.Sleep(simulatedRTT)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
// RACK marks #1 and #2 packets as lost and retransmits both after
// RTT + reorderWindow. The reorderWindow initially will be a small
@@ -289,7 +289,7 @@ func TestRACKWithReorder(t *testing.T) {
sbOff += header.EncodeSACKBlocks([]header.SACKBlock{{
start, end,
}}, sackBlock[sbOff:])
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
}
// Send a DSACK block indicating both original and retransmitted
@@ -304,7 +304,7 @@ func TestRACKWithReorder(t *testing.T) {
dbOff += header.EncodeSACKBlocks([]header.SACKBlock{{
start, end,
}}, dsackBlock[dbOff:])
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1 + numPkts*payloadSize)), Options: dsackBlock[:dbOff]})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1 + numPkts*payloadSize)), Options: dsackBlock[:dbOff]})
seqNum1.UpdateForward(seqnum.Size(numPkts * payloadSize))
sendTime := time.Now()
@@ -321,7 +321,7 @@ func TestRACKWithReorder(t *testing.T) {
sbOff += header.EncodeSACKBlocks([]header.SACKBlock{{
start, end,
}}, sackBlock[sbOff:])
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
// Expect the retransmission of #1 packet after RTT+ReorderWindow.
if _, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(seqNum1))}, time.Second); err != nil {
@@ -361,7 +361,7 @@ func TestRACKWithLostRetransmission(t *testing.T) {
start, end,
}}, sackBlock[sbOff:])
time.Sleep(simulatedRTT)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock[:sbOff]})
// RACK marks #1 packet as lost and retransmits it after
// RTT + reorderWindow. The reorderWindow is bounded between a small
@@ -394,7 +394,7 @@ func TestRACKWithLostRetransmission(t *testing.T) {
start, end,
}}, sackBlock1[sbOff1:])
time.Sleep(simulatedRTT)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock1[:sbOff1]})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: testbench.Uint32(uint32(seqNum1)), Options: sackBlock1[:sbOff1]})
// Expect re-retransmission of #1 packet without entering an RTO.
if _, err := conn.Expect(t, testbench.TCP{SeqNum: testbench.Uint32(uint32(seqNum1))}, timeout); err != nil {
diff --git a/test/packetimpact/tests/tcp_rcv_buf_space_test.go b/test/packetimpact/tests/tcp_rcv_buf_space_test.go
index d6ad5cda6..f121d44eb 100644
--- a/test/packetimpact/tests/tcp_rcv_buf_space_test.go
+++ b/test/packetimpact/tests/tcp_rcv_buf_space_test.go
@@ -17,7 +17,6 @@ package tcp_rcv_buf_space_test
import (
"context"
"flag"
- "syscall"
"testing"
"golang.org/x/sys/unix"
@@ -61,7 +60,7 @@ func TestReduceRecvBuf(t *testing.T) {
payloadBytes = l
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, []testbench.Layer{&testbench.Payload{Bytes: payload[:payloadBytes]}}...)
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, []testbench.Layer{&testbench.Payload{Bytes: payload[:payloadBytes]}}...)
payload = payload[payloadBytes:]
}
@@ -73,7 +72,7 @@ func TestReduceRecvBuf(t *testing.T) {
// Second read should return EAGAIN as the last segment should have been
// dropped due to it exceeding the receive buffer space available in the
// socket.
- if ret, got, err := dut.RecvWithErrno(context.Background(), t, acceptFd, int32(len(sampleData)), syscall.MSG_DONTWAIT); got != nil || ret != -1 || err != syscall.EAGAIN {
+ if ret, got, err := dut.RecvWithErrno(context.Background(), t, acceptFd, int32(len(sampleData)), unix.MSG_DONTWAIT); got != nil || ret != -1 || err != unix.EAGAIN {
t.Fatalf("expected no packets but got: %s", got)
}
}
diff --git a/test/packetimpact/tests/tcp_retransmits_test.go b/test/packetimpact/tests/tcp_retransmits_test.go
index ba79fbf55..c2611c2a6 100644
--- a/test/packetimpact/tests/tcp_retransmits_test.go
+++ b/test/packetimpact/tests/tcp_retransmits_test.go
@@ -66,7 +66,7 @@ func TestRetransmits(t *testing.T) {
// Give a chance for the dut to estimate RTO with RTT from the DATA-ACK.
// TODO(gvisor.dev/issue/2685) Estimate RTO during handshake, after which
// we can skip sending this ACK.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
const timeoutCorrection = time.Second
const diffCorrection = time.Millisecond
diff --git a/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go b/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
index 418393796..64b7288fb 100644
--- a/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
+++ b/test/packetimpact/tests/tcp_send_window_sizes_piggyback_test.go
@@ -71,7 +71,7 @@ func TestSendWindowSizesPiggyback(t *testing.T) {
dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_NODELAY, 1)
- expectedTCP := testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}
+ expectedTCP := testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}
dut.Send(t, acceptFd, sampleData, 0)
expectedPayload := testbench.Payload{Bytes: tt.expectedPayload1}
@@ -90,7 +90,7 @@ func TestSendWindowSizesPiggyback(t *testing.T) {
// Send ACK for the previous segment along with data for the dut to
// receive and ACK back. Sending this ACK would make room for the dut
// to transmit any enqueued segment.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh), WindowSize: testbench.Uint16(tt.windowSize)}, &testbench.Payload{Bytes: sampleData})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh), WindowSize: testbench.Uint16(tt.windowSize)}, &testbench.Payload{Bytes: sampleData})
// Expect the dut to piggyback the ACK for received data along with
// the segment enqueued for transmit.
diff --git a/test/packetimpact/tests/tcp_synrcvd_reset_test.go b/test/packetimpact/tests/tcp_synrcvd_reset_test.go
index 32271d7b2..3346d43c4 100644
--- a/test/packetimpact/tests/tcp_synrcvd_reset_test.go
+++ b/test/packetimpact/tests/tcp_synrcvd_reset_test.go
@@ -37,11 +37,11 @@ func TestTCPSynRcvdReset(t *testing.T) {
defer conn.Close(t)
// Expect dut connection to have transitioned to SYN-RCVD state.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected SYN-ACK %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)})
// Expect the connection to have transitioned SYN-RCVD to CLOSED.
//
@@ -49,8 +49,8 @@ func TestTCPSynRcvdReset(t *testing.T) {
// CLOSED. We cannot use TCP_INFO to lookup the state as this is a passive
// DUT connection.
for i := 0; i < 5; i++ {
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, nil, time.Second); err != nil {
t.Logf("retransmit%d ACK as we did not get the expected RST, %s", i, err)
continue
}
diff --git a/test/packetimpact/tests/tcp_synsent_reset_test.go b/test/packetimpact/tests/tcp_synsent_reset_test.go
index 2c8bb101b..cccb0abc6 100644
--- a/test/packetimpact/tests/tcp_synsent_reset_test.go
+++ b/test/packetimpact/tests/tcp_synsent_reset_test.go
@@ -42,7 +42,7 @@ func dutSynSentState(t *testing.T) (*testbench.DUT, *testbench.TCPIPv4, uint16,
copy(sa.Addr[:], dut.Net.LocalIPv4)
// Bring the dut to SYN-SENT state with a non-blocking connect.
dut.Connect(t, clientFD, &sa)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)}, nil, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)}, nil, time.Second); err != nil {
t.Fatalf("expected SYN\n")
}
@@ -53,11 +53,11 @@ func dutSynSentState(t *testing.T) (*testbench.DUT, *testbench.TCPIPv4, uint16,
func TestTCPSynSentReset(t *testing.T) {
_, conn, _, _ := dutSynSentState(t)
defer conn.Close(t)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst | header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst | header.TCPFlagAck)})
// Expect the connection to have closed.
// TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, nil, time.Second); err != nil {
t.Fatalf("expected a TCP RST")
}
}
@@ -73,15 +73,15 @@ func TestTCPSynSentRcvdReset(t *testing.T) {
// Initiate new SYN connection with the same port pair
// (simultaneous open case), expect the dut connection to move to
// SYN-RCVD state
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagSyn | header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected SYN-ACK %s\n", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)})
// Expect the connection to have transitioned SYN-RCVD to CLOSED.
// TODO(gvisor.dev/issue/478): Check for TCP_INFO on the dut side.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, nil, time.Second); err != nil {
t.Fatalf("expected a TCP RST")
}
}
diff --git a/test/packetimpact/tests/tcp_timewait_reset_test.go b/test/packetimpact/tests/tcp_timewait_reset_test.go
index d1d2fb83d..89037f0a4 100644
--- a/test/packetimpact/tests/tcp_timewait_reset_test.go
+++ b/test/packetimpact/tests/tcp_timewait_reset_test.go
@@ -42,26 +42,26 @@ func TestTimeWaitReset(t *testing.T) {
// Trigger active close.
dut.Close(t, acceptFD)
- _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second)
+ _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected a FIN: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
// Send a FIN, DUT should transition to TIME_WAIT from FIN_WAIT2.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)})
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
t.Fatalf("expected an ACK for our FIN: %s", err)
}
// Send a RST, the DUT should transition to CLOSED from TIME_WAIT.
// This is the default Linux behavior, it can be changed to ignore RSTs via
// sysctl net.ipv4.tcp_rfc1337.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)})
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
// The DUT should reply with RST to our ACK as the state should have
// transitioned to CLOSED.
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, time.Second); err != nil {
t.Fatalf("expected a RST: %s", err)
}
}
diff --git a/test/packetimpact/tests/tcp_unacc_seq_ack_test.go b/test/packetimpact/tests/tcp_unacc_seq_ack_test.go
index ea962c818..92b54b72e 100644
--- a/test/packetimpact/tests/tcp_unacc_seq_ack_test.go
+++ b/test/packetimpact/tests/tcp_unacc_seq_ack_test.go
@@ -17,7 +17,6 @@ package tcp_unacc_seq_ack_test
import (
"flag"
"fmt"
- "syscall"
"testing"
"time"
@@ -59,8 +58,8 @@ func TestEstablishedUnaccSeqAck(t *testing.T) {
sampleData := []byte("Sample Data")
samplePayload := &testbench.Payload{Bytes: sampleData}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected ack %s", err)
}
@@ -74,7 +73,7 @@ func TestEstablishedUnaccSeqAck(t *testing.T) {
// ACK matches the TCP layer state.
*conn.LocalSeqNum(t) = origSeq
}
- gotAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ gotAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
if tt.expectAck && err != nil {
t.Fatalf("expected an ack but got none: %s", err)
}
@@ -110,8 +109,8 @@ func TestPassiveCloseUnaccSeqAck(t *testing.T) {
acceptFD, _ := dut.Accept(t, listenFD)
// Send a FIN to DUT to intiate the passive close.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagFin)})
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagFin)})
+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected an ACK for our fin and DUT should enter CLOSE_WAIT: %s", err)
}
@@ -122,7 +121,7 @@ func TestPassiveCloseUnaccSeqAck(t *testing.T) {
// Send a segment with OTW Seq / unacc ACK.
conn.Send(t, tt.makeTestingTCP(t, &conn, tt.seqNumOffset, windowSize), samplePayload)
- gotAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ gotAck, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
if tt.expectAck && err != nil {
t.Errorf("expected an ack but got none: %s", err)
}
@@ -132,14 +131,14 @@ func TestPassiveCloseUnaccSeqAck(t *testing.T) {
// Now let's verify DUT is indeed in CLOSE_WAIT
dut.Close(t, acceptFD)
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagFin)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagFin)}, time.Second); err != nil {
t.Fatalf("expected DUT to send a FIN: %s", err)
}
// Ack the FIN from DUT
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
// Send some extra data to DUT
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, samplePayload)
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, samplePayload)
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, time.Second); err != nil {
t.Fatalf("expected DUT to send an RST: %s", err)
}
})
@@ -171,14 +170,14 @@ func TestActiveCloseUnaccpSeqAck(t *testing.T) {
acceptFD, _ := dut.Accept(t, listenFD)
// Trigger active close.
- dut.Shutdown(t, acceptFD, syscall.SHUT_WR)
+ dut.Shutdown(t, acceptFD, unix.SHUT_WR)
// Get to FIN_WAIT2
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)}, time.Second)
+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected a FIN: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
sendUnaccSeqAck := func(state string) {
t.Helper()
@@ -193,7 +192,7 @@ func TestActiveCloseUnaccpSeqAck(t *testing.T) {
// incoming ACK matches the TCP layer state.
*conn.LocalSeqNum(t) = origSeq
}
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
t.Errorf("expected an ack in %s state, but got none: %s", state, err)
}
}
@@ -201,8 +200,8 @@ func TestActiveCloseUnaccpSeqAck(t *testing.T) {
sendUnaccSeqAck("FIN_WAIT2")
// Send a FIN to DUT to get to TIME_WAIT
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagFin | header.TCPFlagAck)})
- if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagFin | header.TCPFlagAck)})
+ if _, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second); err != nil {
t.Fatalf("expected an ACK for our fin and DUT should enter TIME_WAIT: %s", err)
}
@@ -218,7 +217,7 @@ func TestActiveCloseUnaccpSeqAck(t *testing.T) {
func generateOTWSeqSegment(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP {
lastAcceptable := conn.LocalSeqNum(t).Add(windowSize)
otwSeq := uint32(lastAcceptable.Add(seqNumOffset))
- return testbench.TCP{SeqNum: testbench.Uint32(otwSeq), Flags: testbench.Uint8(header.TCPFlagAck)}
+ return testbench.TCP{SeqNum: testbench.Uint32(otwSeq), Flags: testbench.TCPFlags(header.TCPFlagAck)}
}
// generateUnaccACKSegment generates an segment with
@@ -227,5 +226,5 @@ func generateOTWSeqSegment(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset s
func generateUnaccACKSegment(t *testing.T, conn *testbench.TCPIPv4, seqNumOffset seqnum.Size, windowSize seqnum.Size) testbench.TCP {
lastAcceptable := conn.RemoteSeqNum(t)
unaccAck := uint32(lastAcceptable.Add(seqNumOffset))
- return testbench.TCP{AckNum: testbench.Uint32(unaccAck), Flags: testbench.Uint8(header.TCPFlagAck)}
+ return testbench.TCP{AckNum: testbench.Uint32(unaccAck), Flags: testbench.TCPFlags(header.TCPFlagAck)}
}
diff --git a/test/packetimpact/tests/tcp_user_timeout_test.go b/test/packetimpact/tests/tcp_user_timeout_test.go
index b16e65366..ef38bd738 100644
--- a/test/packetimpact/tests/tcp_user_timeout_test.go
+++ b/test/packetimpact/tests/tcp_user_timeout_test.go
@@ -35,7 +35,7 @@ func sendPayload(t *testing.T, conn *testbench.TCPIPv4, dut *testbench.DUT, fd i
}
conn.Drain(t)
dut.Send(t, fd, sampleData, 0)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, &testbench.Payload{Bytes: sampleData}, time.Second); err != nil {
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, &testbench.Payload{Bytes: sampleData}, time.Second); err != nil {
t.Fatalf("expected data but got none: %w", err)
}
}
@@ -79,14 +79,14 @@ func TestTCPUserTimeout(t *testing.T) {
time.Sleep(tt.sendDelay)
conn.Drain(t)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
// If TCP_USER_TIMEOUT was set and the above delay was longer than the
// TCP_USER_TIMEOUT then the DUT should send a RST in response to the
// testbench's packet.
expectRST := tt.userTimeout != 0 && tt.sendDelay > tt.userTimeout
expectTimeout := 5 * time.Second
- got, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, expectTimeout)
+ got, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, expectTimeout)
if expectRST && err != nil {
t.Errorf("expected RST packet within %s but got none: %s", expectTimeout, err)
}
diff --git a/test/packetimpact/tests/tcp_window_shrink_test.go b/test/packetimpact/tests/tcp_window_shrink_test.go
index 093484721..0d65a2ea2 100644
--- a/test/packetimpact/tests/tcp_window_shrink_test.go
+++ b/test/packetimpact/tests/tcp_window_shrink_test.go
@@ -48,7 +48,7 @@ func TestWindowShrink(t *testing.T) {
if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
dut.Send(t, acceptFd, sampleData, 0)
dut.Send(t, acceptFd, sampleData, 0)
@@ -59,7 +59,7 @@ func TestWindowShrink(t *testing.T) {
t.Fatalf("expected payload was not received: %s", err)
}
// We close our receiving window here
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
dut.Send(t, acceptFd, []byte("Sample Data"), 0)
// Note: There is another kind of zero-window probing which Windows uses (by sending one
diff --git a/test/packetimpact/tests/tcp_zero_receive_window_test.go b/test/packetimpact/tests/tcp_zero_receive_window_test.go
index d06690705..d73495454 100644
--- a/test/packetimpact/tests/tcp_zero_receive_window_test.go
+++ b/test/packetimpact/tests/tcp_zero_receive_window_test.go
@@ -49,8 +49,8 @@ func TestZeroReceiveWindow(t *testing.T) {
// Expect the DUT to eventually advertise zero receive window.
// The test would timeout otherwise.
for readOnce := false; ; {
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected packet was not received: %s", err)
}
@@ -100,8 +100,8 @@ func TestNonZeroReceiveWindow(t *testing.T) {
// we sent. Once we have received ACKs with non-zero receive windows, we break
// the loop.
for {
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, time.Second)
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
+ gotTCP, err := conn.Expect(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, time.Second)
if err != nil {
t.Fatalf("expected packet was not received: %s", err)
}
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go b/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
index d094c10eb..22b17a39e 100644
--- a/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
+++ b/test/packetimpact/tests/tcp_zero_window_probe_retransmit_test.go
@@ -15,6 +15,7 @@
package tcp_zero_window_probe_retransmit_test
import (
+ "bytes"
"flag"
"testing"
"time"
@@ -51,18 +52,23 @@ func TestZeroWindowProbeRetransmit(t *testing.T) {
if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {
- t.Fatalf("expected packet was not received: %s", err)
- }
// Check for the dut to keep the connection alive as long as the zero window
// probes are acknowledged. Check if the zero window probes are sent at
// exponentially increasing intervals. The timeout intervals are function
// of the recorded first zero probe transmission duration.
//
- // Advertize zero receive window again.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ // Advertize zero receive window along with a payload.
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh), WindowSize: testbench.Uint16(0)}, samplePayload)
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, nil, time.Second); err != nil {
+ t.Fatalf("expected packet was not received: %s", err)
+ }
+ // Wait for the payload to be received by the DUT, which is also an
+ // indication of receive of the peer window advertisement.
+ if got := dut.Recv(t, acceptFd, int32(len(sampleData)), 0); !bytes.Equal(got, sampleData) {
+ t.Fatalf("got dut.Recv(t, %d, %d, 0) = %s, want %s", acceptFd, len(sampleData), got, sampleData)
+ }
+
probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t) - 1))
ackProbe := testbench.Uint32(uint32(*conn.RemoteSeqNum(t)))
@@ -98,10 +104,10 @@ func TestZeroWindowProbeRetransmit(t *testing.T) {
}
prev = got
// Acknowledge the zero-window probes from the dut.
- conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
}
// Advertize non-zero window.
- conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.TCPFlags(header.TCPFlagAck)})
// Expect the dut to recover and transmit data.
if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: ackProbe}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_test.go b/test/packetimpact/tests/tcp_zero_window_probe_test.go
index 650a569cc..8b90fcbe9 100644
--- a/test/packetimpact/tests/tcp_zero_window_probe_test.go
+++ b/test/packetimpact/tests/tcp_zero_window_probe_test.go
@@ -53,8 +53,8 @@ func TestZeroWindowProbe(t *testing.T) {
t.Fatalf("expected payload was not received: %s", err)
}
sendTime := time.Now().Sub(start)
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected packet was not received: %s", err)
}
@@ -62,7 +62,7 @@ func TestZeroWindowProbe(t *testing.T) {
// probe to be sent.
//
// Advertize zero window to the dut.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
// Expected sequence number of the zero window probe.
probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t) - 1))
@@ -93,7 +93,7 @@ func TestZeroWindowProbe(t *testing.T) {
// and sends out the sample payload after the send window opens.
//
// Advertize non-zero window to the dut and ack the zero window probe.
- conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.Uint8(header.TCPFlagAck)})
+ conn.Send(t, testbench.TCP{AckNum: ackProbe, Flags: testbench.TCPFlags(header.TCPFlagAck)})
// Expect the dut to recover and transmit data.
if _, err := conn.ExpectData(t, &testbench.TCP{SeqNum: ackProbe}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
@@ -104,8 +104,8 @@ func TestZeroWindowProbe(t *testing.T) {
// Basically with sequence number to one byte behind the unacknowledged
// sequence number.
p := testbench.Uint32(uint32(*conn.LocalSeqNum(t)))
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), SeqNum: testbench.Uint32(uint32(*conn.LocalSeqNum(t) - 1))})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), AckNum: p}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), SeqNum: testbench.Uint32(uint32(*conn.LocalSeqNum(t) - 1))})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), AckNum: p}, nil, time.Second); err != nil {
t.Fatalf("expected a packet with ack number: %d: %s", p, err)
}
}
diff --git a/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go b/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
index 079fea68c..1ce4d22b7 100644
--- a/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
+++ b/test/packetimpact/tests/tcp_zero_window_probe_usertimeout_test.go
@@ -51,8 +51,8 @@ func TestZeroWindowProbeUserTimeout(t *testing.T) {
if _, err := conn.ExpectData(t, &testbench.TCP{}, samplePayload, time.Second); err != nil {
t.Fatalf("expected payload was not received: %s", err)
}
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck | header.TCPFlagPsh)}, samplePayload)
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)}, nil, time.Second); err != nil {
t.Fatalf("expected packet was not received: %s", err)
}
@@ -60,7 +60,7 @@ func TestZeroWindowProbeUserTimeout(t *testing.T) {
// probe to be sent.
//
// Advertize zero window to the dut.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
// Expected sequence number of the zero window probe.
probeSeq := testbench.Uint32(uint32(*conn.RemoteSeqNum(t) - 1))
@@ -81,7 +81,7 @@ func TestZeroWindowProbeUserTimeout(t *testing.T) {
// Reduce the retransmit timeout.
dut.SetSockOptInt(t, acceptFd, unix.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, int32(startProbeDuration.Milliseconds()))
// Advertize zero window again.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck), WindowSize: testbench.Uint16(0)})
// Ask the dut to send out data that would trigger zero window probe retransmissions.
dut.Send(t, acceptFd, sampleData, 0)
@@ -90,8 +90,8 @@ func TestZeroWindowProbeUserTimeout(t *testing.T) {
// Expect the connection to have timed out and closed which would cause the dut
// to reply with a RST to the ACK we send.
- conn.Send(t, testbench.TCP{Flags: testbench.Uint8(header.TCPFlagAck)})
- if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.Uint8(header.TCPFlagRst)}, nil, time.Second); err != nil {
+ conn.Send(t, testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagAck)})
+ if _, err := conn.ExpectData(t, &testbench.TCP{Flags: testbench.TCPFlags(header.TCPFlagRst)}, nil, time.Second); err != nil {
t.Fatalf("expected a TCP RST")
}
}
diff --git a/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go b/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
index 52c6f9d91..f63cfcc9a 100644
--- a/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
+++ b/test/packetimpact/tests/udp_discard_mcast_source_addr_test.go
@@ -19,7 +19,6 @@ import (
"flag"
"fmt"
"net"
- "syscall"
"testing"
"golang.org/x/sys/unix"
@@ -56,7 +55,7 @@ func TestDiscardsUDPPacketsWithMcastSourceAddressV4(t *testing.T) {
)
ret, payload, errno := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0)
- if errno != syscall.EAGAIN || errno != syscall.EWOULDBLOCK {
+ if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK {
t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, payload, errno)
}
})
@@ -86,7 +85,7 @@ func TestDiscardsUDPPacketsWithMcastSourceAddressV6(t *testing.T) {
&testbench.Payload{Bytes: []byte("test payload")},
)
ret, payload, errno := dut.RecvWithErrno(context.Background(), t, remoteFD, 100, 0)
- if errno != syscall.EAGAIN || errno != syscall.EWOULDBLOCK {
+ if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK {
t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, payload, errno)
}
})
diff --git a/test/packetimpact/tests/udp_icmp_error_propagation_test.go b/test/packetimpact/tests/udp_icmp_error_propagation_test.go
index 3fca8c7a3..3159d5b89 100644
--- a/test/packetimpact/tests/udp_icmp_error_propagation_test.go
+++ b/test/packetimpact/tests/udp_icmp_error_propagation_test.go
@@ -20,7 +20,6 @@ import (
"fmt"
"net"
"sync"
- "syscall"
"testing"
"time"
@@ -86,16 +85,16 @@ type testData struct {
remotePort uint16
cleanFD int32
cleanPort uint16
- wantErrno syscall.Errno
+ wantErrno unix.Errno
}
// wantErrno computes the errno to expect given the connection mode of a UDP
// socket and the ICMP error it will receive.
-func wantErrno(c connectionMode, icmpErr icmpError) syscall.Errno {
+func wantErrno(c connectionMode, icmpErr icmpError) unix.Errno {
if c && icmpErr == portUnreachable {
- return syscall.Errno(unix.ECONNREFUSED)
+ return unix.ECONNREFUSED
}
- return syscall.Errno(0)
+ return unix.Errno(0)
}
// sendICMPError sends an ICMP error message in response to a UDP datagram.
@@ -123,7 +122,7 @@ func sendICMPError(t *testing.T, conn *testbench.UDPIPv4, icmpErr icmpError, udp
conn.SendFrameStateless(t, layers)
}
-// testRecv tests observing the ICMP error through the recv syscall. A packet
+// testRecv tests observing the ICMP error through the recv unix. A packet
// is sent to the DUT, and if wantErrno is non-zero, then the first recv should
// fail and the second should succeed. Otherwise if wantErrno is zero then the
// first recv should succeed immediately.
@@ -136,7 +135,7 @@ func testRecv(ctx context.Context, t *testing.T, d testData) {
d.conn.Send(t, testbench.UDP{})
- if d.wantErrno != syscall.Errno(0) {
+ if d.wantErrno != unix.Errno(0) {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
ret, _, err := d.dut.RecvWithErrno(ctx, t, d.remoteFD, 100, 0)
@@ -162,7 +161,7 @@ func testSendTo(ctx context.Context, t *testing.T, d testData) {
t.Fatalf("did not receive UDP packet from clean socket on DUT: %s", err)
}
- if d.wantErrno != syscall.Errno(0) {
+ if d.wantErrno != unix.Errno(0) {
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
ret, err := d.dut.SendToWithErrno(ctx, t, d.remoteFD, nil, 0, d.conn.LocalAddr(t))
@@ -183,11 +182,11 @@ func testSendTo(ctx context.Context, t *testing.T, d testData) {
func testSockOpt(_ context.Context, t *testing.T, d testData) {
// Check that there's no pending error on the clean socket.
- if errno := syscall.Errno(d.dut.GetSockOptInt(t, d.cleanFD, unix.SOL_SOCKET, unix.SO_ERROR)); errno != syscall.Errno(0) {
+ if errno := unix.Errno(d.dut.GetSockOptInt(t, d.cleanFD, unix.SOL_SOCKET, unix.SO_ERROR)); errno != unix.Errno(0) {
t.Fatalf("unexpected error (%[1]d) %[1]v on clean socket", errno)
}
- if errno := syscall.Errno(d.dut.GetSockOptInt(t, d.remoteFD, unix.SOL_SOCKET, unix.SO_ERROR)); errno != d.wantErrno {
+ if errno := unix.Errno(d.dut.GetSockOptInt(t, d.remoteFD, unix.SOL_SOCKET, unix.SO_ERROR)); errno != d.wantErrno {
t.Fatalf("SO_ERROR sockopt after ICMP error is (%[1]d) %[1]v, expected (%[2]d) %[2]v", errno, d.wantErrno)
}
@@ -310,7 +309,7 @@ func TestICMPErrorDuringUDPRecv(t *testing.T) {
go func() {
defer wg.Done()
- if wantErrno != syscall.Errno(0) {
+ if wantErrno != unix.Errno(0) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
diff --git a/test/packetimpact/tests/udp_send_recv_dgram_test.go b/test/packetimpact/tests/udp_send_recv_dgram_test.go
index 894d156cf..230b012c7 100644
--- a/test/packetimpact/tests/udp_send_recv_dgram_test.go
+++ b/test/packetimpact/tests/udp_send_recv_dgram_test.go
@@ -19,7 +19,6 @@ import (
"flag"
"fmt"
"net"
- "syscall"
"testing"
"time"
@@ -241,7 +240,7 @@ func TestUDP(t *testing.T) {
},
)
ret, recvPayload, errno := dut.RecvWithErrno(context.Background(), t, socketFD, 100, 0)
- if errno != syscall.EAGAIN || errno != syscall.EWOULDBLOCK {
+ if errno != unix.EAGAIN || errno != unix.EWOULDBLOCK {
t.Errorf("Recv got unexpected result, ret=%d, payload=%q, errno=%s", ret, recvPayload, errno)
}
}
diff --git a/test/perf/BUILD b/test/perf/BUILD
index e25f090ae..ed899ac22 100644
--- a/test/perf/BUILD
+++ b/test/perf/BUILD
@@ -1,4 +1,3 @@
-load("//tools:defs.bzl", "more_shards")
load("//test/runner:defs.bzl", "syscall_test")
package(licenses = ["notice"])
@@ -38,7 +37,6 @@ syscall_test(
syscall_test(
size = "enormous",
debug = False,
- shard_count = more_shards,
tags = ["nogotsan"],
test = "//test/perf/linux:getdents_benchmark",
)
diff --git a/test/runner/gtest/gtest.go b/test/runner/gtest/gtest.go
index 38e57d62f..2ad5f58ef 100644
--- a/test/runner/gtest/gtest.go
+++ b/test/runner/gtest/gtest.go
@@ -35,6 +35,39 @@ var (
filterBenchmarkFlag = "--benchmark_filter"
)
+// BuildTestArgs builds arguments to be passed to the test binary to execute
+// only the test cases in `indices`.
+func BuildTestArgs(indices []int, testCases []TestCase) []string {
+ var testFilter, benchFilter string
+ for _, tci := range indices {
+ tc := testCases[tci]
+ if tc.all {
+ // No argument will make all tests run.
+ return nil
+ }
+ if tc.benchmark {
+ if len(benchFilter) > 0 {
+ benchFilter += "|"
+ }
+ benchFilter += "^" + tc.Name + "$"
+ } else {
+ if len(testFilter) > 0 {
+ testFilter += ":"
+ }
+ testFilter += tc.FullName()
+ }
+ }
+
+ var args []string
+ if len(testFilter) > 0 {
+ args = append(args, fmt.Sprintf("%s=%s", filterTestFlag, testFilter))
+ }
+ if len(benchFilter) > 0 {
+ args = append(args, fmt.Sprintf("%s=%s", filterBenchmarkFlag, benchFilter))
+ }
+ return args
+}
+
// TestCase is a single gtest test case.
type TestCase struct {
// Suite is the suite for this test.
@@ -59,22 +92,6 @@ func (tc TestCase) FullName() string {
return fmt.Sprintf("%s.%s", tc.Suite, tc.Name)
}
-// Args returns arguments to be passed when invoking the test.
-func (tc TestCase) Args() []string {
- if tc.all {
- return []string{} // No arguments.
- }
- if tc.benchmark {
- return []string{
- fmt.Sprintf("%s=^%s$", filterBenchmarkFlag, tc.Name),
- fmt.Sprintf("%s=", filterTestFlag),
- }
- }
- return []string{
- fmt.Sprintf("%s=%s", filterTestFlag, tc.FullName()),
- }
-}
-
// ParseTestCases calls a gtest test binary to list its test and returns a
// slice with the name and suite of each test.
//
@@ -90,6 +107,7 @@ func ParseTestCases(testBin string, benchmarks bool, extraArgs ...string) ([]Tes
// We failed to list tests with the given flags. Just
// return something that will run the binary with no
// flags, which should execute all tests.
+ fmt.Printf("failed to get test list: %v\n", err)
return []TestCase{
{
Suite: "Default",
diff --git a/test/runner/runner.go b/test/runner/runner.go
index e72c59200..a8a134fe2 100644
--- a/test/runner/runner.go
+++ b/test/runner/runner.go
@@ -26,7 +26,6 @@ import (
"path/filepath"
"strings"
"syscall"
- "testing"
"time"
specs "github.com/opencontainers/runtime-spec/specs-go"
@@ -57,13 +56,82 @@ var (
leakCheck = flag.Bool("leak-check", false, "check for reference leaks")
)
+func main() {
+ flag.Parse()
+ if flag.NArg() != 1 {
+ fatalf("test must be provided")
+ }
+
+ log.SetLevel(log.Info)
+ if *debug {
+ log.SetLevel(log.Debug)
+ }
+
+ if *platform != "native" && *runscPath == "" {
+ if err := testutil.ConfigureExePath(); err != nil {
+ panic(err.Error())
+ }
+ *runscPath = specutils.ExePath
+ }
+
+ // Make sure stdout and stderr are opened with O_APPEND, otherwise logs
+ // from outside the sandbox can (and will) stomp on logs from inside
+ // the sandbox.
+ for _, f := range []*os.File{os.Stdout, os.Stderr} {
+ flags, err := unix.FcntlInt(f.Fd(), unix.F_GETFL, 0)
+ if err != nil {
+ fatalf("error getting file flags for %v: %v", f, err)
+ }
+ if flags&unix.O_APPEND == 0 {
+ flags |= unix.O_APPEND
+ if _, err := unix.FcntlInt(f.Fd(), unix.F_SETFL, flags); err != nil {
+ fatalf("error setting file flags for %v: %v", f, err)
+ }
+ }
+ }
+
+ // Resolve the absolute path for the binary.
+ testBin, err := filepath.Abs(flag.Args()[0])
+ if err != nil {
+ fatalf("Abs(%q) failed: %v", flag.Args()[0], err)
+ }
+
+ // Get all test cases in each binary.
+ testCases, err := gtest.ParseTestCases(testBin, true)
+ if err != nil {
+ fatalf("ParseTestCases(%q) failed: %v", testBin, err)
+ }
+
+ // Get subset of tests corresponding to shard.
+ indices, err := testutil.TestIndicesForShard(len(testCases))
+ if err != nil {
+ fatalf("TestsForShard() failed: %v", err)
+ }
+ if len(indices) == 0 {
+ log.Warningf("No tests to run in this shard")
+ return
+ }
+ args := gtest.BuildTestArgs(indices, testCases)
+
+ switch *platform {
+ case "native":
+ if err := runTestCaseNative(testBin, args); err != nil {
+ fatalf(err.Error())
+ }
+ default:
+ if err := runTestCaseRunsc(testBin, args); err != nil {
+ fatalf(err.Error())
+ }
+ }
+}
+
// runTestCaseNative runs the test case directly on the host machine.
-func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {
+func runTestCaseNative(testBin string, args []string) error {
// These tests might be running in parallel, so make sure they have a
// unique test temp dir.
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "")
if err != nil {
- t.Fatalf("could not create temp dir: %v", err)
+ return fmt.Errorf("could not create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
@@ -84,12 +152,12 @@ func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {
}
// Remove shard env variables so that the gunit binary does not try to
// interpret them.
- env = filterEnv(env, []string{"TEST_SHARD_INDEX", "TEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_TOTAL_SHARDS"})
+ env = filterEnv(env, "TEST_SHARD_INDEX", "TEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_TOTAL_SHARDS")
if *addUDSTree {
socketDir, cleanup, err := uds.CreateSocketTree("/tmp")
if err != nil {
- t.Fatalf("failed to create socket tree: %v", err)
+ return fmt.Errorf("failed to create socket tree: %v", err)
}
defer cleanup()
@@ -99,24 +167,25 @@ func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {
env = append(env, "TEST_UDS_ATTACH_TREE="+socketDir)
}
- cmd := exec.Command(testBin, tc.Args()...)
+ cmd := exec.Command(testBin, args...)
cmd.Env = env
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
- cmd.SysProcAttr = &syscall.SysProcAttr{}
+ cmd.SysProcAttr = &unix.SysProcAttr{}
if specutils.HasCapabilities(capability.CAP_SYS_ADMIN) {
- cmd.SysProcAttr.Cloneflags |= syscall.CLONE_NEWUTS
+ cmd.SysProcAttr.Cloneflags |= unix.CLONE_NEWUTS
}
if specutils.HasCapabilities(capability.CAP_NET_ADMIN) {
- cmd.SysProcAttr.Cloneflags |= syscall.CLONE_NEWNET
+ cmd.SysProcAttr.Cloneflags |= unix.CLONE_NEWNET
}
if err := cmd.Run(); err != nil {
ws := err.(*exec.ExitError).Sys().(syscall.WaitStatus)
- t.Errorf("test %q exited with status %d, want 0", tc.FullName(), ws.ExitStatus())
+ return fmt.Errorf("test exited with status %d, want 0", ws.ExitStatus())
}
+ return nil
}
// runRunsc runs spec in runsc in a standard test configuration.
@@ -124,7 +193,7 @@ func runTestCaseNative(testBin string, tc gtest.TestCase, t *testing.T) {
// runsc logs will be saved to a path in TEST_UNDECLARED_OUTPUTS_DIR.
//
// Returns an error if the sandboxed application exits non-zero.
-func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {
+func runRunsc(spec *specs.Spec) error {
bundleDir, cleanup, err := testutil.SetupBundleDir(spec)
if err != nil {
return fmt.Errorf("SetupBundleDir failed: %v", err)
@@ -137,9 +206,8 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {
}
defer cleanup()
- name := tc.FullName()
id := testutil.RandomContainerID()
- log.Infof("Running test %q in container %q", name, id)
+ log.Infof("Running test in container %q", id)
specutils.LogSpec(spec)
args := []string{
@@ -148,7 +216,7 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {
"-log-format=text",
"-TESTONLY-unsafe-nonroot=true",
"-net-raw=true",
- fmt.Sprintf("-panic-signal=%d", syscall.SIGTERM),
+ fmt.Sprintf("-panic-signal=%d", unix.SIGTERM),
"-watchdog-action=panic",
"-platform", *platform,
"-file-access", *fileAccess,
@@ -175,13 +243,8 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {
args = append(args, "-ref-leak-mode=log-names")
}
- testLogDir := ""
- if undeclaredOutputsDir, ok := syscall.Getenv("TEST_UNDECLARED_OUTPUTS_DIR"); ok {
- // Create log directory dedicated for this test.
- testLogDir = filepath.Join(undeclaredOutputsDir, strings.Replace(name, "/", "_", -1))
- if err := os.MkdirAll(testLogDir, 0755); err != nil {
- return fmt.Errorf("could not create test dir: %v", err)
- }
+ testLogDir := os.Getenv("TEST_UNDECLARED_OUTPUTS_DIR")
+ if len(testLogDir) > 0 {
debugLogDir, err := ioutil.TempDir(testLogDir, "runsc")
if err != nil {
return fmt.Errorf("could not create temp dir: %v", err)
@@ -200,8 +263,8 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {
// as root inside that namespace to get it.
rArgs := append(args, "run", "--bundle", bundleDir, id)
cmd := exec.Command(*runscPath, rArgs...)
- cmd.SysProcAttr = &syscall.SysProcAttr{
- Cloneflags: syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS,
+ cmd.SysProcAttr = &unix.SysProcAttr{
+ Cloneflags: unix.CLONE_NEWUSER | unix.CLONE_NEWNS,
// Set current user/group as root inside the namespace.
UidMappings: []syscall.SysProcIDMap{
{ContainerID: 0, HostID: os.Getuid(), Size: 1},
@@ -219,14 +282,14 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {
cmd.Stderr = os.Stderr
sig := make(chan os.Signal, 1)
defer close(sig)
- signal.Notify(sig, syscall.SIGTERM)
+ signal.Notify(sig, unix.SIGTERM)
defer signal.Stop(sig)
go func() {
s, ok := <-sig
if !ok {
return
}
- log.Warningf("%s: Got signal: %v", name, s)
+ log.Warningf("Got signal: %v", s)
done := make(chan bool, 1)
dArgs := append([]string{}, args...)
dArgs = append(dArgs, "-alsologtostderr=true", "debug", "--stacks", id)
@@ -247,7 +310,7 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {
log.Warningf("Send SIGTERM to the sandbox process")
dArgs = append(args, "debug",
- fmt.Sprintf("--signal=%d", syscall.SIGTERM),
+ fmt.Sprintf("--signal=%d", unix.SIGTERM),
id)
signal := exec.Command(*runscPath, dArgs...)
signal.Stdout = os.Stdout
@@ -259,7 +322,7 @@ func runRunsc(tc gtest.TestCase, spec *specs.Spec) error {
if err == nil && len(testLogDir) > 0 {
// If the test passed, then we erase the log directory. This speeds up
// uploading logs in continuous integration & saves on disk space.
- os.RemoveAll(testLogDir)
+ _ = os.RemoveAll(testLogDir)
}
return err
@@ -314,10 +377,10 @@ func setupUDSTree(spec *specs.Spec) (cleanup func(), err error) {
}
// runsTestCaseRunsc runs the test case in runsc.
-func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {
+func runTestCaseRunsc(testBin string, args []string) error {
// Run a new container with the test executable and filter for the
// given test suite and name.
- spec := testutil.NewSpecWithArgs(append([]string{testBin}, tc.Args()...)...)
+ spec := testutil.NewSpecWithArgs(append([]string{testBin}, args...)...)
// Mark the root as writeable, as some tests attempt to
// write to the rootfs, and expect EACCES, not EROFS.
@@ -343,12 +406,12 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {
// users, so make sure it is world-accessible.
tmpDir, err := ioutil.TempDir(testutil.TmpDir(), "")
if err != nil {
- t.Fatalf("could not create temp dir: %v", err)
+ return fmt.Errorf("could not create temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
if err := os.Chmod(tmpDir, 0777); err != nil {
- t.Fatalf("could not chmod temp dir: %v", err)
+ return fmt.Errorf("could not chmod temp dir: %v", err)
}
// "/tmp" is not replaced with a tmpfs mount inside the sandbox
@@ -368,13 +431,12 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {
// Set environment variables that indicate we are running in gVisor with
// the given platform, network, and filesystem stack.
- platformVar := "TEST_ON_GVISOR"
- networkVar := "GVISOR_NETWORK"
- env := append(os.Environ(), platformVar+"="+*platform, networkVar+"="+*network)
- vfsVar := "GVISOR_VFS"
+ env := []string{"TEST_ON_GVISOR=" + *platform, "GVISOR_NETWORK=" + *network}
+ env = append(env, os.Environ()...)
+ const vfsVar = "GVISOR_VFS"
if *vfs2 {
env = append(env, vfsVar+"=VFS2")
- fuseVar := "FUSE_ENABLED"
+ const fuseVar = "FUSE_ENABLED"
if *fuse {
env = append(env, fuseVar+"=TRUE")
} else {
@@ -386,11 +448,11 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {
// Remove shard env variables so that the gunit binary does not try to
// interpret them.
- env = filterEnv(env, []string{"TEST_SHARD_INDEX", "TEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_TOTAL_SHARDS"})
+ env = filterEnv(env, "TEST_SHARD_INDEX", "TEST_TOTAL_SHARDS", "GTEST_SHARD_INDEX", "GTEST_TOTAL_SHARDS")
// Set TEST_TMPDIR to /tmp, as some of the syscall tests require it to
// be backed by tmpfs.
- env = filterEnv(env, []string{"TEST_TMPDIR"})
+ env = filterEnv(env, "TEST_TMPDIR")
env = append(env, fmt.Sprintf("TEST_TMPDIR=%s", testTmpDir))
spec.Process.Env = env
@@ -398,18 +460,19 @@ func runTestCaseRunsc(testBin string, tc gtest.TestCase, t *testing.T) {
if *addUDSTree {
cleanup, err := setupUDSTree(spec)
if err != nil {
- t.Fatalf("error creating UDS tree: %v", err)
+ return fmt.Errorf("error creating UDS tree: %v", err)
}
defer cleanup()
}
- if err := runRunsc(tc, spec); err != nil {
- t.Errorf("test %q failed with error %v, want nil", tc.FullName(), err)
+ if err := runRunsc(spec); err != nil {
+ return fmt.Errorf("test failed with error %v, want nil", err)
}
+ return nil
}
// filterEnv returns an environment with the excluded variables removed.
-func filterEnv(env, exclude []string) []string {
+func filterEnv(env []string, exclude ...string) []string {
var out []string
for _, kv := range env {
ok := true
@@ -430,82 +493,3 @@ func fatalf(s string, args ...interface{}) {
fmt.Fprintf(os.Stderr, s+"\n", args...)
os.Exit(1)
}
-
-func matchString(a, b string) (bool, error) {
- return a == b, nil
-}
-
-func main() {
- flag.Parse()
- if flag.NArg() != 1 {
- fatalf("test must be provided")
- }
- testBin := flag.Args()[0] // Only argument.
-
- log.SetLevel(log.Info)
- if *debug {
- log.SetLevel(log.Debug)
- }
-
- if *platform != "native" && *runscPath == "" {
- if err := testutil.ConfigureExePath(); err != nil {
- panic(err.Error())
- }
- *runscPath = specutils.ExePath
- }
-
- // Make sure stdout and stderr are opened with O_APPEND, otherwise logs
- // from outside the sandbox can (and will) stomp on logs from inside
- // the sandbox.
- for _, f := range []*os.File{os.Stdout, os.Stderr} {
- flags, err := unix.FcntlInt(f.Fd(), unix.F_GETFL, 0)
- if err != nil {
- fatalf("error getting file flags for %v: %v", f, err)
- }
- if flags&unix.O_APPEND == 0 {
- flags |= unix.O_APPEND
- if _, err := unix.FcntlInt(f.Fd(), unix.F_SETFL, flags); err != nil {
- fatalf("error setting file flags for %v: %v", f, err)
- }
- }
- }
-
- // Get all test cases in each binary.
- testCases, err := gtest.ParseTestCases(testBin, true)
- if err != nil {
- fatalf("ParseTestCases(%q) failed: %v", testBin, err)
- }
-
- // Get subset of tests corresponding to shard.
- indices, err := testutil.TestIndicesForShard(len(testCases))
- if err != nil {
- fatalf("TestsForShard() failed: %v", err)
- }
-
- // Resolve the absolute path for the binary.
- testBin, err = filepath.Abs(testBin)
- if err != nil {
- fatalf("Abs() failed: %v", err)
- }
-
- // Run the tests.
- var tests []testing.InternalTest
- for _, tci := range indices {
- // Capture tc.
- tc := testCases[tci]
- tests = append(tests, testing.InternalTest{
- Name: fmt.Sprintf("%s_%s", tc.Suite, tc.Name),
- F: func(t *testing.T) {
- if *platform == "native" {
- // Run the test case on host.
- runTestCaseNative(testBin, tc, t)
- } else {
- // Run the test case in runsc.
- runTestCaseRunsc(testBin, tc, t)
- }
- },
- })
- }
-
- testing.Main(matchString, tests, nil, nil)
-}
diff --git a/test/runtimes/proctor/BUILD b/test/runtimes/proctor/BUILD
index fdc6d3173..b4a9b12de 100644
--- a/test/runtimes/proctor/BUILD
+++ b/test/runtimes/proctor/BUILD
@@ -7,5 +7,8 @@ go_binary(
srcs = ["main.go"],
pure = True,
visibility = ["//test/runtimes:__pkg__"],
- deps = ["//test/runtimes/proctor/lib"],
+ deps = [
+ "//test/runtimes/proctor/lib",
+ "@org_golang_x_sys//unix:go_default_library",
+ ],
)
diff --git a/test/runtimes/proctor/lib/BUILD b/test/runtimes/proctor/lib/BUILD
index 0c8367dfe..f834f1b5a 100644
--- a/test/runtimes/proctor/lib/BUILD
+++ b/test/runtimes/proctor/lib/BUILD
@@ -13,6 +13,7 @@ go_library(
"python.go",
],
visibility = ["//test/runtimes/proctor:__pkg__"],
+ deps = ["@org_golang_x_sys//unix:go_default_library"],
)
go_test(
diff --git a/test/runtimes/proctor/lib/lib.go b/test/runtimes/proctor/lib/lib.go
index f2ba82498..36c60088a 100644
--- a/test/runtimes/proctor/lib/lib.go
+++ b/test/runtimes/proctor/lib/lib.go
@@ -22,7 +22,8 @@ import (
"os/signal"
"path/filepath"
"regexp"
- "syscall"
+
+ "golang.org/x/sys/unix"
)
// TestRunner is an interface that must be implemented for each runtime
@@ -59,7 +60,7 @@ func TestRunnerForRuntime(runtime string) (TestRunner, error) {
func PauseAndReap() {
// Get notified of any new children.
ch := make(chan os.Signal, 1)
- signal.Notify(ch, syscall.SIGCHLD)
+ signal.Notify(ch, unix.SIGCHLD)
for {
if _, ok := <-ch; !ok {
@@ -69,7 +70,7 @@ func PauseAndReap() {
// Reap the child.
for {
- if cpid, _ := syscall.Wait4(-1, nil, syscall.WNOHANG, nil); cpid < 1 {
+ if cpid, _ := unix.Wait4(-1, nil, unix.WNOHANG, nil); cpid < 1 {
break
}
}
diff --git a/test/runtimes/proctor/main.go b/test/runtimes/proctor/main.go
index 81cb68381..8c076a499 100644
--- a/test/runtimes/proctor/main.go
+++ b/test/runtimes/proctor/main.go
@@ -22,8 +22,8 @@ import (
"log"
"os"
"strings"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/test/runtimes/proctor/lib"
)
@@ -42,14 +42,14 @@ func setNumFilesLimit() error {
// timeout if the NOFILE limit is too high. On gVisor, syscalls are
// slower so these tests will need even more time to pass.
const nofile = 32768
- rLimit := syscall.Rlimit{}
- err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rLimit)
+ rLimit := unix.Rlimit{}
+ err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rLimit)
if err != nil {
return fmt.Errorf("failed to get RLIMIT_NOFILE: %v", err)
}
if rLimit.Cur > nofile {
rLimit.Cur = nofile
- err := syscall.Setrlimit(syscall.RLIMIT_NOFILE, &rLimit)
+ err := unix.Setrlimit(unix.RLIMIT_NOFILE, &rLimit)
if err != nil {
return fmt.Errorf("failed to set RLIMIT_NOFILE: %v", err)
}
diff --git a/test/syscalls/BUILD b/test/syscalls/BUILD
index 9adb1cea3..ef299799e 100644
--- a/test/syscalls/BUILD
+++ b/test/syscalls/BUILD
@@ -65,14 +65,8 @@ syscall_test(
syscall_test(
size = "large",
- # Produce too many logs in the debug mode.
- debug = False,
shard_count = most_shards,
- # Takes too long for TSAN. Since this is kind of a stress test that doesn't
- # involve much concurrency, TSAN's usefulness here is limited anyway.
- tags = ["nogotsan"],
test = "//test/syscalls/linux:socket_stress_test",
- vfs2 = False,
)
syscall_test(
diff --git a/test/syscalls/linux/BUILD b/test/syscalls/linux/BUILD
index 5371f825c..5399d8106 100644
--- a/test/syscalls/linux/BUILD
+++ b/test/syscalls/linux/BUILD
@@ -2330,13 +2330,15 @@ cc_binary(
],
linkstatic = 1,
deps = [
+ gtest,
":ip_socket_test_util",
":socket_test_util",
- "@com_google_absl//absl/strings",
- gtest,
+ "//test/util:file_descriptor",
"//test/util:test_main",
"//test/util:test_util",
"//test/util:thread_util",
+ "@com_google_absl//absl/strings",
+ "@com_google_absl//absl/time",
],
)
diff --git a/test/syscalls/linux/proc.cc b/test/syscalls/linux/proc.cc
index e508ce27f..61a421788 100644
--- a/test/syscalls/linux/proc.cc
+++ b/test/syscalls/linux/proc.cc
@@ -2162,7 +2162,13 @@ class BlockingChild {
return tid_;
}
- void Join() { Stop(); }
+ void Join() {
+ {
+ absl::MutexLock ml(&mu_);
+ stop_ = true;
+ }
+ thread_.Join();
+ }
private:
void Start() {
@@ -2172,11 +2178,6 @@ class BlockingChild {
mu_.Await(absl::Condition(&stop_));
}
- void Stop() {
- absl::MutexLock ml(&mu_);
- stop_ = true;
- }
-
mutable absl::Mutex mu_;
bool stop_ ABSL_GUARDED_BY(mu_) = false;
pid_t tid_;
@@ -2190,16 +2191,18 @@ class BlockingChild {
TEST(ProcTask, NewThreadAppears) {
auto initial = ASSERT_NO_ERRNO_AND_VALUE(ListDir("/proc/self/task", false));
BlockingChild child1;
- EXPECT_NO_ERRNO(DirContainsExactly("/proc/self/task",
- TaskFiles(initial, {child1.Tid()})));
+ // Use Eventually* in case a proc from ealier test is still tearing down.
+ EXPECT_NO_ERRNO(EventuallyDirContainsExactly(
+ "/proc/self/task", TaskFiles(initial, {child1.Tid()})));
}
TEST(ProcTask, KilledThreadsDisappear) {
auto initial = ASSERT_NO_ERRNO_AND_VALUE(ListDir("/proc/self/task/", false));
BlockingChild child1;
- EXPECT_NO_ERRNO(DirContainsExactly("/proc/self/task",
- TaskFiles(initial, {child1.Tid()})));
+ // Use Eventually* in case a proc from ealier test is still tearing down.
+ EXPECT_NO_ERRNO(EventuallyDirContainsExactly(
+ "/proc/self/task", TaskFiles(initial, {child1.Tid()})));
// Stat child1's task file. Regression test for b/32097707.
struct stat statbuf;
diff --git a/test/syscalls/linux/proc_net.cc b/test/syscalls/linux/proc_net.cc
index 73140b2e9..20f1dc305 100644
--- a/test/syscalls/linux/proc_net.cc
+++ b/test/syscalls/linux/proc_net.cc
@@ -40,6 +40,7 @@ namespace {
constexpr const char kProcNet[] = "/proc/net";
constexpr const char kIpForward[] = "/proc/sys/net/ipv4/ip_forward";
+constexpr const char kRangeFile[] = "/proc/sys/net/ipv4/ip_local_port_range";
TEST(ProcNetSymlinkTarget, FileMode) {
struct stat s;
@@ -562,6 +563,42 @@ TEST(ProcSysNetIpv4IpForward, CanReadAndWrite) {
EXPECT_EQ(buf, to_write);
}
+TEST(ProcSysNetPortRange, CanReadAndWrite) {
+ int min;
+ int max;
+ std::string rangefile = ASSERT_NO_ERRNO_AND_VALUE(GetContents(kRangeFile));
+ ASSERT_EQ(rangefile.back(), '\n');
+ rangefile.pop_back();
+ std::vector<std::string> range =
+ absl::StrSplit(rangefile, absl::ByAnyChar("\t "));
+ ASSERT_GT(range.size(), 1);
+ ASSERT_TRUE(absl::SimpleAtoi(range.front(), &min));
+ ASSERT_TRUE(absl::SimpleAtoi(range.back(), &max));
+ EXPECT_LE(min, max);
+
+ // If the file isn't writable, there's nothing else to do here.
+ if (access(kRangeFile, W_OK)) {
+ return;
+ }
+
+ constexpr int kSize = 77;
+ FileDescriptor fd =
+ ASSERT_NO_ERRNO_AND_VALUE(Open(kRangeFile, O_WRONLY | O_TRUNC, 0));
+ max = min + kSize;
+ const std::string small_range = absl::StrFormat("%d %d", min, max);
+ ASSERT_THAT(write(fd.get(), small_range.c_str(), small_range.size()),
+ SyscallSucceedsWithValue(small_range.size()));
+
+ rangefile = ASSERT_NO_ERRNO_AND_VALUE(GetContents(kRangeFile));
+ ASSERT_EQ(rangefile.back(), '\n');
+ rangefile.pop_back();
+ range = absl::StrSplit(rangefile, absl::ByAnyChar("\t "));
+ ASSERT_GT(range.size(), 1);
+ ASSERT_TRUE(absl::SimpleAtoi(range.front(), &min));
+ ASSERT_TRUE(absl::SimpleAtoi(range.back(), &max));
+ EXPECT_EQ(min + kSize, max);
+}
+
} // namespace
} // namespace testing
} // namespace gvisor
diff --git a/test/syscalls/linux/pty.cc b/test/syscalls/linux/pty.cc
index 294b9f6fd..8d15c491e 100644
--- a/test/syscalls/linux/pty.cc
+++ b/test/syscalls/linux/pty.cc
@@ -1255,8 +1255,11 @@ TEST_F(PtyTest, PartialBadBuffer) {
// Read from the replica into bad_buffer.
ASSERT_NO_ERRNO(WaitUntilReceived(replica_.get(), size));
- EXPECT_THAT(ReadFd(replica_.get(), bad_buffer, size),
- SyscallFailsWithErrno(EFAULT));
+ // Before Linux 3b830a9c this returned EFAULT, but after that commit it
+ // returns EAGAIN.
+ EXPECT_THAT(
+ ReadFd(replica_.get(), bad_buffer, size),
+ AnyOf(SyscallFailsWithErrno(EFAULT), SyscallFailsWithErrno(EAGAIN)));
EXPECT_THAT(munmap(addr, 2 * kPageSize), SyscallSucceeds()) << addr;
}
diff --git a/test/syscalls/linux/socket_generic_stress.cc b/test/syscalls/linux/socket_generic_stress.cc
index 679586530..c35aa2183 100644
--- a/test/syscalls/linux/socket_generic_stress.cc
+++ b/test/syscalls/linux/socket_generic_stress.cc
@@ -17,29 +17,72 @@
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/un.h>
+#include <unistd.h>
#include <array>
#include <string>
#include "gtest/gtest.h"
+#include "absl/strings/numbers.h"
+#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
#include "test/syscalls/linux/ip_socket_test_util.h"
#include "test/syscalls/linux/socket_test_util.h"
+#include "test/util/file_descriptor.h"
#include "test/util/test_util.h"
#include "test/util/thread_util.h"
namespace gvisor {
namespace testing {
+constexpr char kRangeFile[] = "/proc/sys/net/ipv4/ip_local_port_range";
+
+PosixErrorOr<int> NumPorts() {
+ int min = 0;
+ int max = 1 << 16;
+
+ // Read the ephemeral range from /proc.
+ ASSIGN_OR_RETURN_ERRNO(std::string rangefile, GetContents(kRangeFile));
+ const std::string err_msg =
+ absl::StrFormat("%s has invalid content: %s", kRangeFile, rangefile);
+ if (rangefile.back() != '\n') {
+ return PosixError(EINVAL, err_msg);
+ }
+ rangefile.pop_back();
+ std::vector<std::string> range =
+ absl::StrSplit(rangefile, absl::ByAnyChar("\t "));
+ if (range.size() < 2 || !absl::SimpleAtoi(range.front(), &min) ||
+ !absl::SimpleAtoi(range.back(), &max)) {
+ return PosixError(EINVAL, err_msg);
+ }
+
+ // If we can open as writable, limit the range.
+ if (!access(kRangeFile, W_OK)) {
+ ASSIGN_OR_RETURN_ERRNO(FileDescriptor fd,
+ Open(kRangeFile, O_WRONLY | O_TRUNC, 0));
+ max = min + 50;
+ const std::string small_range = absl::StrFormat("%d %d", min, max);
+ int n = write(fd.get(), small_range.c_str(), small_range.size());
+ if (n < 0) {
+ return PosixError(
+ errno,
+ absl::StrFormat("write(%d [%s], \"%s\", %d)", fd.get(), kRangeFile,
+ small_range.c_str(), small_range.size()));
+ }
+ }
+ return max - min;
+}
+
// Test fixture for tests that apply to pairs of connected sockets.
using ConnectStressTest = SocketPairTest;
-TEST_P(ConnectStressTest, Reset65kTimes) {
- // TODO(b/165912341): These are too slow on KVM platform with nested virt.
- SKIP_IF(GvisorPlatform() == Platform::kKVM);
-
- for (int i = 0; i < 1 << 16; ++i) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
+TEST_P(ConnectStressTest, Reset) {
+ const int nports = ASSERT_NO_ERRNO_AND_VALUE(NumPorts());
+ for (int i = 0; i < nports * 2; i++) {
+ const std::unique_ptr<SocketPair> sockets =
+ ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
// Send some data to ensure that the connection gets reset and the port gets
// released immediately. This avoids either end entering TIME-WAIT.
@@ -57,6 +100,24 @@ TEST_P(ConnectStressTest, Reset65kTimes) {
}
}
+// Tests that opening too many connections -- without closing them -- does lead
+// to port exhaustion.
+TEST_P(ConnectStressTest, TooManyOpen) {
+ const int nports = ASSERT_NO_ERRNO_AND_VALUE(NumPorts());
+ int err_num = 0;
+ std::vector<std::unique_ptr<SocketPair>> sockets =
+ std::vector<std::unique_ptr<SocketPair>>(nports);
+ for (int i = 0; i < nports * 2; i++) {
+ PosixErrorOr<std::unique_ptr<SocketPair>> socks = NewSocketPair();
+ if (!socks.ok()) {
+ err_num = socks.error().errno_value();
+ break;
+ }
+ sockets.push_back(std::move(socks).ValueOrDie());
+ }
+ ASSERT_EQ(err_num, EADDRINUSE);
+}
+
INSTANTIATE_TEST_SUITE_P(
AllConnectedSockets, ConnectStressTest,
::testing::Values(IPv6UDPBidirectionalBindSocketPair(0),
@@ -73,14 +134,40 @@ INSTANTIATE_TEST_SUITE_P(
// Test fixture for tests that apply to pairs of connected sockets created with
// a persistent listener (if applicable).
-using PersistentListenerConnectStressTest = SocketPairTest;
+class PersistentListenerConnectStressTest : public SocketPairTest {
+ protected:
+ PersistentListenerConnectStressTest() : slept_{false} {}
-TEST_P(PersistentListenerConnectStressTest, 65kTimesShutdownCloseFirst) {
- // TODO(b/165912341): These are too slow on KVM platform with nested virt.
- SKIP_IF(GvisorPlatform() == Platform::kKVM);
+ // NewSocketSleep is the same as NewSocketPair, but will sleep once (over the
+ // lifetime of the fixture) and retry if creation fails due to EADDRNOTAVAIL.
+ PosixErrorOr<std::unique_ptr<SocketPair>> NewSocketSleep() {
+ // We can't reuse a connection too close in time to its last use, as TCP
+ // uses the timestamp difference to disambiguate connections. With a
+ // sufficiently small port range, we'll cycle through too quickly, and TCP
+ // won't allow for connection reuse. Thus, we sleep the first time
+ // encountering EADDRINUSE to allow for that difference (1 second in
+ // gVisor).
+ PosixErrorOr<std::unique_ptr<SocketPair>> socks = NewSocketPair();
+ if (socks.ok()) {
+ return socks;
+ }
+ if (!slept_ && socks.error().errno_value() == EADDRNOTAVAIL) {
+ absl::SleepFor(absl::Milliseconds(1500));
+ slept_ = true;
+ return NewSocketPair();
+ }
+ return socks;
+ }
- for (int i = 0; i < 1 << 16; ++i) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
+ private:
+ bool slept_;
+};
+
+TEST_P(PersistentListenerConnectStressTest, ShutdownCloseFirst) {
+ const int nports = ASSERT_NO_ERRNO_AND_VALUE(NumPorts());
+ for (int i = 0; i < nports * 2; i++) {
+ std::unique_ptr<SocketPair> sockets =
+ ASSERT_NO_ERRNO_AND_VALUE(NewSocketSleep());
ASSERT_THAT(shutdown(sockets->first_fd(), SHUT_RDWR), SyscallSucceeds());
if (GetParam().type == SOCK_STREAM) {
// Poll the other FD to make sure that we see the FIN from the other
@@ -97,12 +184,11 @@ TEST_P(PersistentListenerConnectStressTest, 65kTimesShutdownCloseFirst) {
}
}
-TEST_P(PersistentListenerConnectStressTest, 65kTimesShutdownCloseSecond) {
- // TODO(b/165912341): These are too slow on KVM platform with nested virt.
- SKIP_IF(GvisorPlatform() == Platform::kKVM);
-
- for (int i = 0; i < 1 << 16; ++i) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
+TEST_P(PersistentListenerConnectStressTest, ShutdownCloseSecond) {
+ const int nports = ASSERT_NO_ERRNO_AND_VALUE(NumPorts());
+ for (int i = 0; i < nports * 2; i++) {
+ const std::unique_ptr<SocketPair> sockets =
+ ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
ASSERT_THAT(shutdown(sockets->second_fd(), SHUT_RDWR), SyscallSucceeds());
if (GetParam().type == SOCK_STREAM) {
// Poll the other FD to make sure that we see the FIN from the other
@@ -119,12 +205,11 @@ TEST_P(PersistentListenerConnectStressTest, 65kTimesShutdownCloseSecond) {
}
}
-TEST_P(PersistentListenerConnectStressTest, 65kTimesClose) {
- // TODO(b/165912341): These are too slow on KVM platform with nested virt.
- SKIP_IF(GvisorPlatform() == Platform::kKVM);
-
- for (int i = 0; i < 1 << 16; ++i) {
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
+TEST_P(PersistentListenerConnectStressTest, Close) {
+ const int nports = ASSERT_NO_ERRNO_AND_VALUE(NumPorts());
+ for (int i = 0; i < nports * 2; i++) {
+ std::unique_ptr<SocketPair> sockets =
+ ASSERT_NO_ERRNO_AND_VALUE(NewSocketSleep());
}
}
@@ -149,7 +234,8 @@ TEST_P(DataTransferStressTest, BigDataTransfer) {
// TODO(b/165912341): These are too slow on KVM platform with nested virt.
SKIP_IF(GvisorPlatform() == Platform::kKVM);
- auto sockets = ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
+ const std::unique_ptr<SocketPair> sockets =
+ ASSERT_NO_ERRNO_AND_VALUE(NewSocketPair());
int client_fd = sockets->first_fd();
int server_fd = sockets->second_fd();
diff --git a/test/syscalls/linux/socket_inet_loopback.cc b/test/syscalls/linux/socket_inet_loopback.cc
index 344a5a22c..54b45b075 100644
--- a/test/syscalls/linux/socket_inet_loopback.cc
+++ b/test/syscalls/linux/socket_inet_loopback.cc
@@ -705,12 +705,6 @@ TEST_P(SocketInetLoopbackTest, TCPFinWait2Test_NoRandomSave) {
ds.reset();
- if (!IsRunningOnGvisor()) {
- ASSERT_THAT(
- bind(conn_fd2.get(), reinterpret_cast<sockaddr*>(&conn_bound_addr),
- conn_addrlen),
- SyscallSucceeds());
- }
ASSERT_THAT(RetryEINTR(connect)(conn_fd2.get(),
reinterpret_cast<sockaddr*>(&conn_addr),
conn_addrlen),
diff --git a/test/uds/BUILD b/test/uds/BUILD
index 51e2c7ce8..a8f49b50c 100644
--- a/test/uds/BUILD
+++ b/test/uds/BUILD
@@ -12,5 +12,6 @@ go_library(
deps = [
"//pkg/log",
"//pkg/unet",
+ "@org_golang_x_sys//unix:go_default_library",
],
)
diff --git a/test/uds/uds.go b/test/uds/uds.go
index b714c61b0..02a4a7dee 100644
--- a/test/uds/uds.go
+++ b/test/uds/uds.go
@@ -21,8 +21,8 @@ import (
"io/ioutil"
"os"
"path/filepath"
- "syscall"
+ "golang.org/x/sys/unix"
"gvisor.dev/gvisor/pkg/log"
"gvisor.dev/gvisor/pkg/unet"
)
@@ -31,16 +31,16 @@ import (
//
// Only works for stream, seqpacket sockets.
func createEchoSocket(path string, protocol int) (cleanup func(), err error) {
- fd, err := syscall.Socket(syscall.AF_UNIX, protocol, 0)
+ fd, err := unix.Socket(unix.AF_UNIX, protocol, 0)
if err != nil {
return nil, fmt.Errorf("error creating echo(%d) socket: %v", protocol, err)
}
- if err := syscall.Bind(fd, &syscall.SockaddrUnix{Name: path}); err != nil {
+ if err := unix.Bind(fd, &unix.SockaddrUnix{Name: path}); err != nil {
return nil, fmt.Errorf("error binding echo(%d) socket: %v", protocol, err)
}
- if err := syscall.Listen(fd, 0); err != nil {
+ if err := unix.Listen(fd, 0); err != nil {
return nil, fmt.Errorf("error listening echo(%d) socket: %v", protocol, err)
}
@@ -97,17 +97,17 @@ func createEchoSocket(path string, protocol int) (cleanup func(), err error) {
//
// Only relevant for stream, seqpacket sockets.
func createNonListeningSocket(path string, protocol int) (cleanup func(), err error) {
- fd, err := syscall.Socket(syscall.AF_UNIX, protocol, 0)
+ fd, err := unix.Socket(unix.AF_UNIX, protocol, 0)
if err != nil {
return nil, fmt.Errorf("error creating nonlistening(%d) socket: %v", protocol, err)
}
- if err := syscall.Bind(fd, &syscall.SockaddrUnix{Name: path}); err != nil {
+ if err := unix.Bind(fd, &unix.SockaddrUnix{Name: path}); err != nil {
return nil, fmt.Errorf("error binding nonlistening(%d) socket: %v", protocol, err)
}
cleanup = func() {
- if err := syscall.Close(fd); err != nil {
+ if err := unix.Close(fd); err != nil {
log.Warningf("Failed to close nonlistening(%d) socket: %v", protocol, err)
}
}
@@ -119,12 +119,12 @@ func createNonListeningSocket(path string, protocol int) (cleanup func(), err er
//
// Only works for dgram sockets.
func createNullSocket(path string, protocol int) (cleanup func(), err error) {
- fd, err := syscall.Socket(syscall.AF_UNIX, protocol, 0)
+ fd, err := unix.Socket(unix.AF_UNIX, protocol, 0)
if err != nil {
return nil, fmt.Errorf("error creating null(%d) socket: %v", protocol, err)
}
- if err := syscall.Bind(fd, &syscall.SockaddrUnix{Name: path}); err != nil {
+ if err := unix.Bind(fd, &unix.SockaddrUnix{Name: path}); err != nil {
return nil, fmt.Errorf("error binding null(%d) socket: %v", protocol, err)
}
@@ -174,7 +174,7 @@ func CreateSocketTree(baseDir string) (dir string, cleanup func(), err error) {
sockets map[string]socketCreator
}{
{
- protocol: syscall.SOCK_STREAM,
+ protocol: unix.SOCK_STREAM,
name: "stream",
sockets: map[string]socketCreator{
"echo": createEchoSocket,
@@ -182,7 +182,7 @@ func CreateSocketTree(baseDir string) (dir string, cleanup func(), err error) {
},
},
{
- protocol: syscall.SOCK_SEQPACKET,
+ protocol: unix.SOCK_SEQPACKET,
name: "seqpacket",
sockets: map[string]socketCreator{
"echo": createEchoSocket,
@@ -190,7 +190,7 @@ func CreateSocketTree(baseDir string) (dir string, cleanup func(), err error) {
},
},
{
- protocol: syscall.SOCK_DGRAM,
+ protocol: unix.SOCK_DGRAM,
name: "dgram",
sockets: map[string]socketCreator{
"null": createNullSocket,
diff --git a/tools/checklocks/BUILD b/tools/checklocks/BUILD
new file mode 100644
index 000000000..7d4c63dc7
--- /dev/null
+++ b/tools/checklocks/BUILD
@@ -0,0 +1,16 @@
+load("//tools:defs.bzl", "go_library")
+
+package(licenses = ["notice"])
+
+go_library(
+ name = "checklocks",
+ srcs = ["checklocks.go"],
+ nogo = False,
+ visibility = ["//tools/nogo:__subpackages__"],
+ deps = [
+ "//pkg/log",
+ "@org_golang_x_tools//go/analysis:go_default_library",
+ "@org_golang_x_tools//go/analysis/passes/buildssa:go_default_library",
+ "@org_golang_x_tools//go/ssa:go_default_library",
+ ],
+)
diff --git a/tools/checklocks/README.md b/tools/checklocks/README.md
new file mode 100644
index 000000000..dfb0275ab
--- /dev/null
+++ b/tools/checklocks/README.md
@@ -0,0 +1,142 @@
+# CheckLocks Analyzer
+
+<!--* freshness: { owner: 'gvisor-eng' reviewed: '2020-10-05' } *-->
+
+Checklocks is a nogo analyzer that at compile time uses Go's static analysis
+tools to identify and flag cases where a field that is guarded by a mutex in the
+same struct is accessed outside of a mutex lock.
+
+The analyzer relies on explicit '// +checklocks:<mutex-name>' kind of
+annotations to identify fields that should be checked for access.
+
+Individual struct members may be protected by annotations that indicate how they
+must be accessed. These annotations are of the form:
+
+```go
+type foo struct {
+ mu sync.Mutex
+ // +checklocks:mu
+ bar int
+
+ foo int // No annotation on foo means it's not guarded by mu.
+
+ secondMu sync.Mutex
+
+ // Multiple annotations indicate that both must be held but the
+ // checker does not assert any lock ordering.
+ // +checklocks:secondMu
+ // +checklocks:mu
+ foobar int
+}
+```
+
+The checklocks annotation may also apply to functions. For example:
+
+```go
+// +checklocks:f.mu
+func (f *foo) doThingLocked() { }
+```
+
+This will check that the "f.mu" is locked for any calls, where possible.
+
+In case of functions which initialize structs that may have annotations one can
+use the following annotation on the function to disable reporting by the lock
+checker. The lock checker will still track any mutexes acquired or released but
+won't report any failures for this function for unguarded field access.
+
+```go
+// +checklocks:ignore
+func newXXX() *X {
+...
+}
+```
+
+***The checker treats both 'sync.Mutex' and 'sync.RWMutex' identically, i.e, as
+a sync.Mutex. The checker does not distinguish between read locks vs. exclusive
+locks and treats all locks as exclusive locks***.
+
+For cases the checker is able to correctly handle today please see test/test.go.
+
+The checklocks check also flags any invalid annotations where the mutex
+annotation refers either to something that is not a 'sync.Mutex' or
+'sync.RWMutex' or where the field does not exist at all. This will prevent the
+annotations from becoming stale over time as fields are renamed, etc.
+
+# Currently not supported
+
+1. The analyzer does not correctly handle deferred functions. e.g The following
+ code is not correctly checked by the analyzer. The defer call is never
+ evaluated. As a result if the lock was to be say unlocked twice via deferred
+ functions it would not be caught by the analyzer.
+
+ Similarly deferred anonymous functions are not evaluated either.
+
+```go
+type A struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ x int
+}
+
+func abc() {
+ var a A
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ defer a.mu.Unlock()
+ a.x = 1
+}
+```
+
+1. Anonymous functions are not correctly evaluated. The analyzer does not
+ currently support specifying annotations on anonymous functions as a result
+ evaluation of a function that accesses protected fields will fail.
+
+```go
+type A struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ x int
+}
+
+func abc() {
+ var a A
+ f := func() { a.x = 1 } <=== This line will be flagged by analyzer
+ a.mu.Lock()
+ f()
+ a.mu.Unlock()
+}
+
+```
+
+# Explicitly Not Supported
+
+1. Checking for embedded mutexes as sync.Locker rather than directly as
+ 'sync.Mutex'. In other words, the checker will not track mutex Lock and
+ Unlock() methods where the mutex is behind an interface dispatch.
+
+An example that we won't handle is shown below (this in fact will fail to
+build):
+
+```go
+type A struct {
+ mu sync.Locker
+
+ // +checklocks:mu
+ x int
+}
+
+func abc() {
+ mu sync.Mutex
+ a := A{mu: &mu}
+ a.x = 1 // This won't be flagged by copylocks checker.
+}
+
+```
+
+1. The checker will not support guards on anything other than the cases
+ described above. For example, global mutexes cannot be referred to by
+ checklocks. Only struct members can be used.
+
+2. The checker will not support checking for lock ordering violations.
diff --git a/tools/checklocks/checklocks.go b/tools/checklocks/checklocks.go
new file mode 100644
index 000000000..4ec2918f6
--- /dev/null
+++ b/tools/checklocks/checklocks.go
@@ -0,0 +1,761 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package checklocks performs lock analysis to identify and flag unprotected
+// access to field annotated with a '// +checklocks:<mutex-name>' annotation.
+//
+// For detailed ussage refer to README.md in the same directory.
+package checklocks
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/analysis"
+ "golang.org/x/tools/go/analysis/passes/buildssa"
+ "golang.org/x/tools/go/ssa"
+ "gvisor.dev/gvisor/pkg/log"
+)
+
+const (
+ checkLocksAnnotation = "// +checklocks:"
+ checkLocksIgnore = "// +checklocksignore"
+ checkLocksFail = "// +checklocksfail"
+)
+
+// Analyzer is the main entrypoint.
+var Analyzer = &analysis.Analyzer{
+ Name: "checklocks",
+ Doc: "checks lock preconditions on functions and fields",
+ Run: run,
+ Requires: []*analysis.Analyzer{buildssa.Analyzer},
+ FactTypes: []analysis.Fact{(*lockFieldFacts)(nil), (*lockFunctionFacts)(nil)},
+}
+
+// lockFieldFacts apply on every struct field protected by a lock or that is a
+// lock.
+type lockFieldFacts struct {
+ // GuardedBy tracks the names and field numbers that guard this field.
+ GuardedBy map[string]int
+
+ // IsMutex is true if the field is of type sync.Mutex.
+ IsMutex bool
+
+ // IsRWMutex is true if the field is of type sync.RWMutex.
+ IsRWMutex bool
+
+ // FieldNumber is the number of this field in the struct.
+ FieldNumber int
+}
+
+// AFact implements analysis.Fact.AFact.
+func (*lockFieldFacts) AFact() {}
+
+type functionGuard struct {
+ // ParameterNumber is the index of the object that contains the guarding mutex.
+ // This is required during SSA analysis as field names and parameters names are
+ // not available in SSA. For example, from the example below ParameterNumber would
+ // be 1 and FieldNumber would correspond to the field number of 'mu' within b's type.
+ //
+ // //+checklocks:b.mu
+ // func (a *A) method(b *B, c *C) {
+ // ...
+ // }
+ ParameterNumber int
+
+ // FieldNumber is the field index of the mutex in the parameter's struct
+ // type. Refer to example above for more details.
+ FieldNumber int
+}
+
+// lockFunctionFacts apply on every method.
+type lockFunctionFacts struct {
+ // GuardedBy tracks the names and number of parameter (including receiver)
+ // lockFuncfields that guard calls to this function.
+ // The key is the name specified in the checklocks annotation. e.g given
+ // the following code.
+ // ```
+ // type A struct {
+ // mu sync.Mutex
+ // a int
+ // }
+ //
+ // // +checklocks:a.mu
+ // func xyz(a *A) {..}
+ // ```
+ //
+ // '`+checklocks:a.mu' will result in an entry in this map as shown below.
+ // GuardedBy: {"a.mu" => {ParameterNumber: 0, FieldNumber: 0}
+ GuardedBy map[string]functionGuard
+}
+
+// AFact implements analysis.Fact.AFact.
+func (*lockFunctionFacts) AFact() {}
+
+type positionKey string
+
+// toPositionKey converts from a token.Position to a key we can use to track
+// failures as the position of the failure annotation is not the same as the
+// position of the actual failure (different column/offsets). Hence we ignore
+// these fields and only use the file/line numbers to track failures.
+func toPositionKey(position token.Position) positionKey {
+ return positionKey(fmt.Sprintf("%s:%d", position.Filename, position.Line))
+}
+
+type failData struct {
+ pos token.Pos
+ count int
+}
+
+func (f failData) String() string {
+ return fmt.Sprintf("pos: %d, count: %d", f.pos, f.count)
+}
+
+type passContext struct {
+ pass *analysis.Pass
+
+ // exemptions tracks functions that should be exempted from lock checking due
+ // to '// +checklocksignore' annotation.
+ exemptions map[types.Object]struct{}
+
+ failures map[positionKey]*failData
+}
+
+var (
+ mutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineMutex|Mutex)")
+ rwMutexRE = regexp.MustCompile("((.*/)|^)sync.(CrossGoroutineRWMutex|RWMutex)")
+)
+
+func (pc *passContext) extractFieldAnnotations(field *ast.Field, fieldType *types.Var) *lockFieldFacts {
+ s := fieldType.Type().String()
+ // We use HasSuffix below because fieldType can be fully qualified with the
+ // package name eg for the gvisor sync package mutex fields have the type:
+ // "<package path>/sync/sync.Mutex"
+ switch {
+ case mutexRE.Match([]byte(s)):
+ return &lockFieldFacts{IsMutex: true}
+ case rwMutexRE.Match([]byte(s)):
+ return &lockFieldFacts{IsRWMutex: true}
+ default:
+ }
+ if field.Doc == nil {
+ return nil
+ }
+ fieldFacts := &lockFieldFacts{GuardedBy: make(map[string]int)}
+ for _, l := range field.Doc.List {
+ if strings.HasPrefix(l.Text, checkLocksAnnotation) {
+ guardName := strings.TrimPrefix(l.Text, checkLocksAnnotation)
+ if _, ok := fieldFacts.GuardedBy[guardName]; ok {
+ pc.pass.Reportf(field.Pos(), "annotation %s specified more than once.", l.Text)
+ continue
+ }
+ fieldFacts.GuardedBy[guardName] = -1
+ }
+ }
+
+ return fieldFacts
+}
+
+func (pc *passContext) findField(v ssa.Value, fieldNumber int) types.Object {
+ structType, ok := v.Type().Underlying().(*types.Struct)
+ if !ok {
+ structType = v.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct)
+ }
+ return structType.Field(fieldNumber)
+}
+
+// findAndExportStructFacts finds any struct fields that are annotated with the
+// "// +checklocks:" annotation and exports relevant facts about the fields to
+// be used in later analysis.
+func (pc *passContext) findAndExportStructFacts(ss *ast.StructType, structType *types.Struct) {
+ type fieldRef struct {
+ fieldObj *types.Var
+ facts *lockFieldFacts
+ }
+ mutexes := make(map[string]*fieldRef)
+ rwMutexes := make(map[string]*fieldRef)
+ guardedFields := make(map[string]*fieldRef)
+ for i, field := range ss.Fields.List {
+ fieldObj := structType.Field(i)
+ fieldFacts := pc.extractFieldAnnotations(field, fieldObj)
+ if fieldFacts == nil {
+ continue
+ }
+ fieldFacts.FieldNumber = i
+
+ ref := &fieldRef{fieldObj, fieldFacts}
+ if fieldFacts.IsMutex {
+ mutexes[fieldObj.Name()] = ref
+ }
+ if fieldFacts.IsRWMutex {
+ rwMutexes[fieldObj.Name()] = ref
+ }
+ if len(fieldFacts.GuardedBy) != 0 {
+ guardedFields[fieldObj.Name()] = ref
+ }
+ }
+
+ // Export facts about all mutexes.
+ for _, f := range mutexes {
+ pc.pass.ExportObjectFact(f.fieldObj, f.facts)
+ }
+ // Export facts about all rwMutexes.
+ for _, f := range rwMutexes {
+ pc.pass.ExportObjectFact(f.fieldObj, f.facts)
+ }
+
+ // Validate that guarded fields annotations refer to actual mutexes or
+ // rwMutexes in the struct.
+ for _, gf := range guardedFields {
+ for g := range gf.facts.GuardedBy {
+ if f, ok := mutexes[g]; ok {
+ gf.facts.GuardedBy[g] = f.facts.FieldNumber
+ } else if f, ok := rwMutexes[g]; ok {
+ gf.facts.GuardedBy[g] = f.facts.FieldNumber
+ } else {
+ pc.maybeFail(gf.fieldObj.Pos(), false /* isExempted */, "invalid mutex guard, no such mutex %s in struct %s", g, structType.String())
+ continue
+ }
+ // Export guarded field fact.
+ pc.pass.ExportObjectFact(gf.fieldObj, gf.facts)
+ }
+ }
+}
+
+func (pc *passContext) findAndExportFuncFacts(d *ast.FuncDecl) {
+ log.Debugf("finding and exporting function facts\n")
+ // for each function definition, check for +checklocks:mu annotation, which
+ // means that the function must be called with that lock held.
+ fnObj := pc.pass.TypesInfo.ObjectOf(d.Name)
+ funcFacts := lockFunctionFacts{GuardedBy: make(map[string]functionGuard)}
+ var (
+ ignore bool
+ ignorePos token.Pos
+ )
+
+outerLoop:
+ for _, l := range d.Doc.List {
+ if strings.HasPrefix(l.Text, checkLocksIgnore) {
+ pc.exemptions[fnObj] = struct{}{}
+ ignore = true
+ ignorePos = l.Pos()
+ continue
+ }
+ if strings.HasPrefix(l.Text, checkLocksAnnotation) {
+ guardName := strings.TrimPrefix(l.Text, checkLocksAnnotation)
+ if _, ok := funcFacts.GuardedBy[guardName]; ok {
+ pc.pass.Reportf(l.Pos(), "annotation %s specified more than once.", l.Text)
+ continue
+ }
+
+ found := false
+ x := strings.Split(guardName, ".")
+ if len(x) != 2 {
+ pc.pass.Reportf(l.Pos(), "checklocks mutex annotation should be of the form 'a.b'")
+ continue
+ }
+ paramName, fieldName := x[0], x[1]
+ log.Debugf("paramName: %s, fieldName: %s", paramName, fieldName)
+ var paramList []*ast.Field
+ if d.Recv != nil {
+ paramList = append(paramList, d.Recv.List...)
+ }
+ if d.Type.Params != nil {
+ paramList = append(paramList, d.Type.Params.List...)
+ }
+ for paramNum, field := range paramList {
+ log.Debugf("field names: %+v", field.Names)
+ if len(field.Names) == 0 {
+ log.Debugf("skipping because parameter is unnamed", paramName)
+ continue
+ }
+ nameExists := false
+ for _, name := range field.Names {
+ if name.Name == paramName {
+ nameExists = true
+ }
+ }
+ if !nameExists {
+ log.Debugf("skipping because parameter name(s) does not match : %s", paramName)
+ continue
+ }
+ ptrType, ok := pc.pass.TypesInfo.TypeOf(field.Type).Underlying().(*types.Pointer)
+ if !ok {
+ // Since mutexes cannot be copied we only care about parameters that
+ // are pointer types when checking for guards.
+ pc.pass.Reportf(l.Pos(), "annotation %s incorrectly specified, parameter name does not refer to a pointer type", l.Text)
+ continue outerLoop
+ }
+
+ structType, ok := ptrType.Elem().Underlying().(*types.Struct)
+ if !ok {
+ pc.pass.Reportf(l.Pos(), "annotation %s incorrectly specified, parameter name does not refer to a pointer to a struct", l.Text)
+ continue outerLoop
+ }
+
+ for i := 0; i < structType.NumFields(); i++ {
+ if structType.Field(i).Name() == fieldName {
+ var fieldFacts lockFieldFacts
+ pc.pass.ImportObjectFact(structType.Field(i), &fieldFacts)
+ if !fieldFacts.IsMutex && !fieldFacts.IsRWMutex {
+ pc.pass.Reportf(l.Pos(), "field %s of param %s is not a mutex or an rwmutex", paramName, structType.Field(i))
+ continue outerLoop
+ }
+ funcFacts.GuardedBy[guardName] = functionGuard{ParameterNumber: paramNum, FieldNumber: i}
+ found = true
+ continue outerLoop
+ }
+ }
+ if !found {
+ pc.pass.Reportf(l.Pos(), "annotation refers to a non-existent field %s in %s", guardName, structType)
+ continue outerLoop
+ }
+ }
+ if !found {
+ pc.pass.Reportf(l.Pos(), "annotation refers to a non-existent parameter %s", paramName)
+ }
+ }
+ }
+
+ if len(funcFacts.GuardedBy) == 0 {
+ return
+ }
+ if ignore {
+ pc.pass.Reportf(ignorePos, "//+checklocksignore cannot be specified with other annotations on the function")
+ }
+ funcObj, ok := pc.pass.TypesInfo.Defs[d.Name].(*types.Func)
+ if !ok {
+ panic(fmt.Sprintf("function type information missing for %+v", d))
+ }
+ log.Debugf("export fact for d: %+v, funcObj: %+v, funcFacts: %+v\n", d, funcObj, funcFacts)
+ pc.pass.ExportObjectFact(funcObj, &funcFacts)
+}
+
+type mutexState struct {
+ // lockedMutexes is used to track which mutexes in a given struct are
+ // currently locked using the field number of the mutex as the key.
+ lockedMutexes map[int]struct{}
+}
+
+// locksHeld tracks all currently held locks.
+type locksHeld struct {
+ locks map[ssa.Value]mutexState
+}
+
+// Same returns true if the locks held by other and l are the same.
+func (l *locksHeld) Same(other *locksHeld) bool {
+ return reflect.DeepEqual(l.locks, other.locks)
+}
+
+// Copy creates a copy of all the lock state held by l.
+func (l *locksHeld) Copy() *locksHeld {
+ out := &locksHeld{locks: make(map[ssa.Value]mutexState)}
+ for ssaVal, mState := range l.locks {
+ newLM := make(map[int]struct{})
+ for k, v := range mState.lockedMutexes {
+ newLM[k] = v
+ }
+ out.locks[ssaVal] = mutexState{lockedMutexes: newLM}
+ }
+ return out
+}
+
+func isAlias(first, second ssa.Value) bool {
+ if first == second {
+ return true
+ }
+ switch x := first.(type) {
+ case *ssa.Field:
+ if y, ok := second.(*ssa.Field); ok {
+ return x.Field == y.Field && isAlias(x.X, y.X)
+ }
+ case *ssa.FieldAddr:
+ if y, ok := second.(*ssa.FieldAddr); ok {
+ return x.Field == y.Field && isAlias(x.X, y.X)
+ }
+ case *ssa.Index:
+ if y, ok := second.(*ssa.Index); ok {
+ return isAlias(x.Index, y.Index) && isAlias(x.X, y.X)
+ }
+ case *ssa.IndexAddr:
+ if y, ok := second.(*ssa.IndexAddr); ok {
+ return isAlias(x.Index, y.Index) && isAlias(x.X, y.X)
+ }
+ case *ssa.UnOp:
+ if y, ok := second.(*ssa.UnOp); ok {
+ return isAlias(x.X, y.X)
+ }
+ }
+ return false
+}
+
+// checkBasicBlocks traverses the control flow graph starting at a set of given
+// block and checks each instruction for allowed operations.
+//
+// funcFact are the exported facts for the enclosing function for these basic
+// blocks.
+func (pc *passContext) checkBasicBlocks(blocks []*ssa.BasicBlock, recoverBlock *ssa.BasicBlock, fn *ssa.Function, funcFact lockFunctionFacts) {
+ if len(blocks) == 0 {
+ return
+ }
+
+ // mutexes is used to track currently locked sync.Mutexes/sync.RWMutexes for a
+ // given *struct identified by ssa.Value.
+ seen := make(map[*ssa.BasicBlock]*locksHeld)
+ var scan func(block *ssa.BasicBlock, parent *locksHeld)
+ scan = func(block *ssa.BasicBlock, parent *locksHeld) {
+ _, isExempted := pc.exemptions[block.Parent().Object()]
+ if oldLocksHeld, ok := seen[block]; ok {
+ if oldLocksHeld.Same(parent) {
+ return
+ }
+ pc.maybeFail(block.Instrs[0].Pos(), isExempted, "failure entering a block %+v with different sets of lock held, oldLocks: %+v, parentLocks: %+v", block, oldLocksHeld, parent)
+ return
+ }
+ seen[block] = parent
+ var lh = parent.Copy()
+ for _, inst := range block.Instrs {
+ pc.checkInstruction(inst, isExempted, lh)
+ }
+ for _, b := range block.Succs {
+ scan(b, lh)
+ }
+ }
+
+ // Initialize lh with any preconditions that require locks to be held for the
+ // method to be invoked.
+ lh := &locksHeld{locks: make(map[ssa.Value]mutexState)}
+ for _, fg := range funcFact.GuardedBy {
+ // The first is the method object itself so we skip that when looking
+ // for receiver/function parameters.
+ log.Debugf("fn: %s, fn.Operands() == %+v", fn, fn.Operands(nil))
+ r := fn.Params[fg.ParameterNumber]
+ guardObj := findField(r, fg.FieldNumber)
+ var fieldFacts lockFieldFacts
+ pc.pass.ImportObjectFact(guardObj, &fieldFacts)
+ if fieldFacts.IsMutex || fieldFacts.IsRWMutex {
+ m, ok := lh.locks[r]
+ if !ok {
+ m = mutexState{lockedMutexes: make(map[int]struct{})}
+ lh.locks[r] = m
+ }
+ m.lockedMutexes[fieldFacts.FieldNumber] = struct{}{}
+ } else {
+ panic(fmt.Sprintf("function: %+v has an invalid guard that is not a mutex: %+v", fn, guardObj))
+ }
+ }
+
+ // Start scanning from the first basic block.
+ scan(blocks[0], lh)
+
+ // Validate that all blocks were touched.
+ for _, b := range blocks {
+ if _, ok := seen[b]; !ok && b != recoverBlock {
+ panic(fmt.Sprintf("block %+v was not visited during checkBasicBlocks", b))
+ }
+ }
+}
+
+func (pc *passContext) checkInstruction(inst ssa.Instruction, isExempted bool, lh *locksHeld) {
+ log.Debugf("checking instruction: %s, isExempted: %t", inst, isExempted)
+ switch x := inst.(type) {
+ case *ssa.Field:
+ pc.checkFieldAccess(inst, x.X, x.Field, isExempted, lh)
+ case *ssa.FieldAddr:
+ pc.checkFieldAccess(inst, x.X, x.Field, isExempted, lh)
+ case *ssa.Call:
+ pc.checkFunctionCall(x, isExempted, lh)
+ }
+}
+
+func findField(v ssa.Value, field int) types.Object {
+ structType, ok := v.Type().Underlying().(*types.Struct)
+ if !ok {
+ ptrType, ok := v.Type().Underlying().(*types.Pointer)
+ if !ok {
+ return nil
+ }
+ structType = ptrType.Elem().Underlying().(*types.Struct)
+ }
+ return structType.Field(field)
+}
+
+func (pc *passContext) maybeFail(pos token.Pos, isExempted bool, fmtStr string, args ...interface{}) {
+ posKey := toPositionKey(pc.pass.Fset.Position(pos))
+ log.Debugf("maybeFail: pos: %d, positionKey: %s", pos, posKey)
+ if fData, ok := pc.failures[posKey]; ok {
+ fData.count--
+ if fData.count == 0 {
+ delete(pc.failures, posKey)
+ }
+ return
+ }
+ if !isExempted {
+ pc.pass.Reportf(pos, fmt.Sprintf(fmtStr, args...))
+ }
+}
+
+func (pc *passContext) checkFieldAccess(inst ssa.Instruction, structObj ssa.Value, field int, isExempted bool, lh *locksHeld) {
+ var fieldFacts lockFieldFacts
+ fieldObj := findField(structObj, field)
+ pc.pass.ImportObjectFact(fieldObj, &fieldFacts)
+ log.Debugf("fieldObj: %s, fieldFacts: %+v", fieldObj, fieldFacts)
+ for _, guardFieldNumber := range fieldFacts.GuardedBy {
+ guardObj := findField(structObj, guardFieldNumber)
+ var guardfieldFacts lockFieldFacts
+ pc.pass.ImportObjectFact(guardObj, &guardfieldFacts)
+ log.Debugf("guardObj: %s, guardFieldFacts: %+v", guardObj, guardfieldFacts)
+ if guardfieldFacts.IsMutex || guardfieldFacts.IsRWMutex {
+ log.Debugf("guard is a mutex")
+ m, ok := lh.locks[structObj]
+ if !ok {
+ pc.maybeFail(inst.Pos(), isExempted, "invalid field access, %s must be locked when accessing %s", guardObj.Name(), fieldObj.Name())
+ continue
+ }
+ if _, ok := m.lockedMutexes[guardfieldFacts.FieldNumber]; !ok {
+ pc.maybeFail(inst.Pos(), isExempted, "invalid field access, %s must be locked when accessing %s", guardObj.Name(), fieldObj.Name())
+ }
+ } else {
+ panic("incorrect guard that is not a mutex or an RWMutex")
+ }
+ }
+}
+
+func (pc *passContext) checkFunctionCall(call *ssa.Call, isExempted bool, lh *locksHeld) {
+ // See: https://godoc.org/golang.org/x/tools/go/ssa#CallCommon
+ //
+ // 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon represents an ordinary
+ // function call of the value in Value, which may be a *Builtin, a *Function or any
+ // other value of kind 'func'.
+ //
+ // Value may be one of:
+ // (a) a *Function, indicating a statically dispatched call
+ // to a package-level function, an anonymous function, or
+ // a method of a named type.
+ //
+ // (b) a *MakeClosure, indicating an immediately applied
+ // function literal with free variables.
+ //
+ // (c) a *Builtin, indicating a statically dispatched call
+ // to a built-in function.
+ //
+ // (d) any other value, indicating a dynamically dispatched
+ // function call.
+ fn, ok := call.Common().Value.(*ssa.Function)
+ if !ok {
+ return
+ }
+
+ if fn.Object() == nil {
+ log.Warningf("fn w/ nil Object is: %+v", fn)
+ return
+ }
+
+ // Check if the function should be called with any locks held.
+ var funcFact lockFunctionFacts
+ pc.pass.ImportObjectFact(fn.Object(), &funcFact)
+ if len(funcFact.GuardedBy) > 0 {
+ for _, fg := range funcFact.GuardedBy {
+ // The first is the method object itself so we skip that when looking
+ // for receiver/function parameters.
+ r := (*call.Value().Operands(nil)[fg.ParameterNumber+1])
+ guardObj := findField(r, fg.FieldNumber)
+ if guardObj == nil {
+ log.Infof("guardObj nil but funcFact: %+v", funcFact)
+ continue
+ }
+ var fieldFacts lockFieldFacts
+ pc.pass.ImportObjectFact(guardObj, &fieldFacts)
+ if fieldFacts.IsMutex || fieldFacts.IsRWMutex {
+ heldMutexes, ok := lh.locks[r]
+ if !ok {
+ log.Debugf("fn: %s, funcFact: %+v", fn, funcFact)
+ pc.maybeFail(call.Pos(), isExempted, "invalid function call %s must be held", guardObj.Name())
+ continue
+ }
+ if _, ok := heldMutexes.lockedMutexes[fg.FieldNumber]; !ok {
+ log.Debugf("fn: %s, funcFact: %+v", fn, funcFact)
+ pc.maybeFail(call.Pos(), isExempted, "invalid function call %s must be held", guardObj.Name())
+ }
+ } else {
+ panic(fmt.Sprintf("function: %+v has an invalid guard that is not a mutex: %+v", fn, guardObj))
+ }
+ }
+ }
+
+ // Check if it's a method dispatch for something in the sync package.
+ // See: https://godoc.org/golang.org/x/tools/go/ssa#Function
+ if fn.Package() != nil && fn.Package().Pkg.Name() == "sync" && fn.Signature.Recv() != nil {
+ r, ok := call.Common().Args[0].(*ssa.FieldAddr)
+ if !ok {
+ return
+ }
+ guardObj := findField(r.X, r.Field)
+ var fieldFacts lockFieldFacts
+ pc.pass.ImportObjectFact(guardObj, &fieldFacts)
+ if fieldFacts.IsMutex || fieldFacts.IsRWMutex {
+ switch fn.Name() {
+ case "Lock", "RLock":
+ obj := r.X
+ m := mutexState{lockedMutexes: make(map[int]struct{})}
+ for k, v := range lh.locks {
+ if isAlias(r.X, k) {
+ obj = k
+ m = v
+ }
+ }
+ if _, ok := m.lockedMutexes[r.Field]; ok {
+ // Double locking a mutex that is already locked.
+ pc.maybeFail(call.Pos(), isExempted, "trying to a lock %s when already locked", guardObj.Name())
+ return
+ }
+ m.lockedMutexes[r.Field] = struct{}{}
+ lh.locks[obj] = m
+ case "Unlock", "RUnlock":
+ // Find the associated locker object.
+ var (
+ obj ssa.Value
+ m mutexState
+ )
+ for k, v := range lh.locks {
+ if isAlias(r.X, k) {
+ obj = k
+ m = v
+ break
+ }
+ }
+ if _, ok := m.lockedMutexes[r.Field]; !ok {
+ pc.maybeFail(call.Pos(), isExempted, "trying to unlock a mutex %s that is already unlocked", guardObj.Name())
+ return
+ }
+ delete(m.lockedMutexes, r.Field)
+ if len(m.lockedMutexes) == 0 {
+ delete(lh.locks, obj)
+ }
+ case "RLocker", "DowngradeLock", "TryLock", "TryRLock":
+ // we explicitly ignore this for now.
+ default:
+ panic(fmt.Sprintf("unexpected mutex/rwmutex method invoked: %s", fn.Name()))
+ }
+ }
+ }
+}
+
+func run(pass *analysis.Pass) (interface{}, error) {
+ pc := &passContext{
+ pass: pass,
+ exemptions: make(map[types.Object]struct{}),
+ failures: make(map[positionKey]*failData),
+ }
+
+ // Find all line failure annotations.
+ for _, f := range pass.Files {
+ for _, cg := range f.Comments {
+ for _, c := range cg.List {
+ if strings.Contains(c.Text, checkLocksFail) {
+ cnt := 1
+ if strings.Contains(c.Text, checkLocksFail+":") {
+ parts := strings.SplitAfter(c.Text, checkLocksFail+":")
+ parsedCount, err := strconv.Atoi(parts[1])
+ if err != nil {
+ pc.pass.Reportf(c.Pos(), "invalid checklocks annotation : %s", err)
+ continue
+ }
+ cnt = parsedCount
+ }
+ position := toPositionKey(pass.Fset.Position(c.Pos()))
+ pc.failures[position] = &failData{pos: c.Pos(), count: cnt}
+ }
+ }
+ }
+ }
+
+ // Find all struct declarations and export any relevant facts.
+ for _, f := range pass.Files {
+ for _, decl := range f.Decls {
+ d, ok := decl.(*ast.GenDecl)
+ // A GenDecl node (generic declaration node) represents an import,
+ // constant, type or variable declaration. We only care about struct
+ // declarations so skip any declaration that doesn't declare a new type.
+ if !ok || d.Tok != token.TYPE {
+ continue
+ }
+
+ for _, gs := range d.Specs {
+ ts := gs.(*ast.TypeSpec)
+ ss, ok := ts.Type.(*ast.StructType)
+ if !ok {
+ continue
+ }
+ structType := pass.TypesInfo.TypeOf(ts.Name).Underlying().(*types.Struct)
+ pc.findAndExportStructFacts(ss, structType)
+ }
+ }
+ }
+
+ // Find all method calls and export any relevant facts.
+ for _, f := range pass.Files {
+ for _, decl := range f.Decls {
+ d, ok := decl.(*ast.FuncDecl)
+ // Ignore any non function declarations and any functions that do not have
+ // any comments.
+ if !ok || d.Doc == nil {
+ continue
+ }
+ pc.findAndExportFuncFacts(d)
+ }
+ }
+
+ // log all known facts and all failures if debug logging is enabled.
+ allFacts := pass.AllObjectFacts()
+ for i := range allFacts {
+ log.Debugf("fact.object: %+v, fact.Fact: %+v", allFacts[i].Object, allFacts[i].Fact)
+ }
+ log.Debugf("all expected failures: %+v", pc.failures)
+
+ // Scan all code looking for invalid accesses.
+ state := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA)
+ for _, fn := range state.SrcFuncs {
+ var funcFact lockFunctionFacts
+ // Anonymous(closures) functions do not have an object() but do show up in
+ // the SSA.
+ if obj := fn.Object(); obj != nil {
+ pc.pass.ImportObjectFact(fn.Object(), &funcFact)
+ }
+
+ log.Debugf("checking function: %s", fn)
+ var b bytes.Buffer
+ ssa.WriteFunction(&b, fn)
+ log.Debugf("function SSA: %s", b.String())
+ if fn.Recover != nil {
+ pc.checkBasicBlocks([]*ssa.BasicBlock{fn.Recover}, nil, fn, funcFact)
+ }
+ pc.checkBasicBlocks(fn.Blocks, fn.Recover, fn, funcFact)
+ }
+
+ // Scan for remaining failures we expect.
+ for _, failure := range pc.failures {
+ // We are missing expect failures, report as much as possible.
+ pass.Reportf(failure.pos, "expected %d failures", failure.count)
+ }
+
+ return nil, nil
+}
diff --git a/tools/checklocks/test/BUILD b/tools/checklocks/test/BUILD
new file mode 100644
index 000000000..b055e71d9
--- /dev/null
+++ b/tools/checklocks/test/BUILD
@@ -0,0 +1,8 @@
+load("//tools:defs.bzl", "go_library")
+
+package(licenses = ["notice"])
+
+go_library(
+ name = "test",
+ srcs = ["test.go"],
+)
diff --git a/tools/checklocks/test/test.go b/tools/checklocks/test/test.go
new file mode 100644
index 000000000..05693c183
--- /dev/null
+++ b/tools/checklocks/test/test.go
@@ -0,0 +1,362 @@
+// Copyright 2020 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package test is a test package.
+package test
+
+import (
+ "math/rand"
+ "sync"
+)
+
+type oneGuarded struct {
+ mu sync.Mutex
+ // +checklocks:mu
+ guardedField int
+
+ unguardedField int
+}
+
+func testAccessOne() {
+ var tc oneGuarded
+ // Valid access
+ tc.mu.Lock()
+ tc.guardedField = 1
+ tc.unguardedField = 1
+ tc.mu.Unlock()
+
+ // Valid access as unguarded field is not protected by mu.
+ tc.unguardedField = 2
+
+ // Invalid access
+ tc.guardedField = 2 // +checklocksfail
+
+ // Invalid read of a guarded field.
+ x := tc.guardedField // +checklocksfail
+ _ = x
+}
+
+func testFunctionCallsNoParameters() {
+ // Couple of regular function calls with no parameters.
+ funcCallWithValidAccess()
+ funcCallWithInvalidAccess()
+}
+
+func funcCallWithValidAccess() {
+ var tc2 oneGuarded
+ // Valid tc2 access
+ tc2.mu.Lock()
+ tc2.guardedField = 1
+ tc2.mu.Unlock()
+}
+
+func funcCallWithInvalidAccess() {
+ var tc oneGuarded
+ var tc2 oneGuarded
+ // Invalid access, wrong mutex is held.
+ tc.mu.Lock()
+ tc2.guardedField = 2 // +checklocksfail
+ tc.mu.Unlock()
+}
+
+func testParameterPassing() {
+ var tc oneGuarded
+
+ // Valid call where a guardedField is passed to a function as a parameter.
+ tc.mu.Lock()
+ nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField)
+ tc.mu.Unlock()
+
+ // Invalid call where a guardedField is passed to a function as a parameter
+ // without holding locks.
+ nestedWithGuardByAddr(&tc.guardedField, &tc.unguardedField) // +checklocksfail
+
+ // Valid call where a guardedField is passed to a function as a parameter.
+ tc.mu.Lock()
+ nestedWithGuardByValue(tc.guardedField, tc.unguardedField)
+ tc.mu.Unlock()
+
+ // Invalid call where a guardedField is passed to a function as a parameter
+ // without holding locks.
+ nestedWithGuardByValue(tc.guardedField, tc.unguardedField) // +checklocksfail
+}
+
+func nestedWithGuardByAddr(guardedField, unguardedField *int) {
+ *guardedField = 4
+ *unguardedField = 5
+}
+
+func nestedWithGuardByValue(guardedField, unguardedField int) {
+ // read the fields to keep SA4009 static analyzer happy.
+ _ = guardedField
+ _ = unguardedField
+ guardedField = 4
+ unguardedField = 5
+}
+
+type twoGuarded struct {
+ mu sync.Mutex
+ // +checklocks:mu
+ guardedField1 int
+ // +checklocks:mu
+ guardedField2 int
+}
+
+type twoLocks struct {
+ mu sync.Mutex
+ secondMu sync.Mutex
+
+ // +checklocks:mu
+ guardedField1 int
+ // +checklocks:secondMu
+ guardedField2 int
+}
+
+type twoLocksDoubleGuard struct {
+ mu sync.Mutex
+ secondMu sync.Mutex
+
+ // +checklocks:mu
+ // +checklocks:secondMu
+ doubleGuardedField int
+}
+
+func testTwoLocksDoubleGuard() {
+ var tc twoLocksDoubleGuard
+
+ // Double guarded field
+ tc.mu.Lock()
+ tc.secondMu.Lock()
+ tc.doubleGuardedField = 1
+ tc.secondMu.Unlock()
+
+ // This should fail as we released the secondMu.
+ tc.doubleGuardedField = 2 // +checklocksfail
+ tc.mu.Unlock()
+
+ // This should fail as well as now we are not holding any locks.
+ //
+ // This line triggers two failures one for each mutex, hence the 2 after
+ // fail.
+ tc.doubleGuardedField = 3 // +checklocksfail:2
+}
+
+type rwGuarded struct {
+ rwMu sync.RWMutex
+
+ // +checklocks:rwMu
+ rwGuardedField int
+}
+
+func testRWGuarded() {
+ var tc rwGuarded
+
+ // Assignment w/ exclusive lock should pass.
+ tc.rwMu.Lock()
+ tc.rwGuardedField = 1
+ tc.rwMu.Unlock()
+
+ // Assignment w/ RWLock should pass as we don't differentiate between
+ // Lock/RLock.
+ tc.rwMu.RLock()
+ tc.rwGuardedField = 2
+ tc.rwMu.RUnlock()
+
+ // Assignment w/o hold Lock() should fail.
+ tc.rwGuardedField = 3 // +checklocksfail
+
+ // Reading w/o holding lock should fail.
+ x := tc.rwGuardedField + 3 // +checklocksfail
+ _ = x
+}
+
+type nestedFields struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ nestedStruct struct {
+ nested1 int
+ nested2 int
+ }
+}
+
+func testNestedStructGuards() {
+ var tc nestedFields
+ // Valid access with mu held.
+ tc.mu.Lock()
+ tc.nestedStruct.nested1 = 1
+ tc.nestedStruct.nested2 = 2
+ tc.mu.Unlock()
+
+ // Invalid access to nested1 wihout holding mu.
+ tc.nestedStruct.nested1 = 1 // +checklocksfail
+}
+
+type testCaseMethods struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ guardedField int
+}
+
+func (t *testCaseMethods) Method() {
+ // Valid access
+ t.mu.Lock()
+ t.guardedField = 1
+ t.mu.Unlock()
+
+ // invalid access
+ t.guardedField = 2 // +checklocksfail
+}
+
+// +checklocks:t.mu
+func (t *testCaseMethods) MethodLocked(a, b, c int) {
+ t.guardedField = 3
+}
+
+// +checklocksignore
+func (t *testCaseMethods) IgnoredMethod() {
+ // Invalid access but should not fail as the function is annotated
+ // with "// +checklocksignore"
+ t.guardedField = 2
+}
+
+func testMethodCalls() {
+ var tc2 testCaseMethods
+
+ // Valid use, tc2.Method acquires lock.
+ tc2.Method()
+
+ // Valid access tc2.mu is held before calling tc2.MethodLocked.
+ tc2.mu.Lock()
+ tc2.MethodLocked(1, 2, 3)
+ tc2.mu.Unlock()
+
+ // Invalid access no locks are being held.
+ tc2.MethodLocked(4, 5, 6) // +checklocksfail
+}
+
+type noMutex struct {
+ f int
+ g int
+}
+
+func (n noMutex) method() {
+ n.f = 1
+ n.f = n.g
+}
+
+func testNoMutex() {
+ var n noMutex
+ n.method()
+}
+
+func testMultiple() {
+ var tc1, tc2, tc3 testCaseMethods
+
+ tc1.mu.Lock()
+
+ // Valid access we are holding tc1's lock.
+ tc1.guardedField = 1
+
+ // Invalid access we are not holding tc2 or tc3's lock.
+ tc2.guardedField = 2 // +checklocksfail
+ tc3.guardedField = 3 // +checklocksfail
+ tc1.mu.Unlock()
+}
+
+func testConditionalBranchingLocks() {
+ var tc2 testCaseMethods
+ x := rand.Intn(10)
+ if x%2 == 1 {
+ tc2.mu.Lock()
+ }
+ // This is invalid access as tc2.mu is not held if we never entered
+ // the if block.
+ tc2.guardedField = 1 // +checklocksfail
+
+ var tc3 testCaseMethods
+ if x%2 == 1 {
+ tc3.mu.Lock()
+ } else {
+ tc3.mu.Lock()
+ }
+ // This is valid as tc3.mu is held in if and else blocks.
+ tc3.guardedField = 1
+}
+
+type testMethodWithParams struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ guardedField int
+}
+
+type ptrToTestMethodWithParams *testMethodWithParams
+
+// +checklocks:t.mu
+// +checklocks:a.mu
+func (t *testMethodWithParams) methodLockedWithParams(a *testMethodWithParams, b *testMethodWithParams) {
+ t.guardedField = a.guardedField
+ b.guardedField = a.guardedField // +checklocksfail
+}
+
+// +checklocks:t.mu
+// +checklocks:a.mu
+// +checklocks:b.mu
+func (t *testMethodWithParams) methodLockedWithPtrType(a *testMethodWithParams, b ptrToTestMethodWithParams) {
+ t.guardedField = a.guardedField
+ b.guardedField = a.guardedField
+}
+
+// +checklocks:a.mu
+func standaloneFunctionWithGuard(a *testMethodWithParams) {
+ a.guardedField = 1
+ a.mu.Unlock()
+ a.guardedField = 1 // +checklocksfail
+}
+
+type testMethodWithEmbedded struct {
+ mu sync.Mutex
+
+ // +checklocks:mu
+ guardedField int
+ p *testMethodWithParams
+}
+
+// +checklocks:t.mu
+func (t *testMethodWithEmbedded) DoLocked() {
+ var a, b testMethodWithParams
+ t.guardedField = 1
+ a.mu.Lock()
+ b.mu.Lock()
+ t.p.methodLockedWithParams(&a, &b) // +checklocksfail
+ a.mu.Unlock()
+ b.mu.Unlock()
+}
+
+// UnsupportedLockerExample is a test that verifies that trying to annotate a
+// field that is not a sync.Mutex/RWMutex results in a failure.
+type UnsupportedLockerExample struct {
+ mu sync.Locker
+
+ // +checklocks:mu
+ x int // +checklocksfail
+}
+
+func abc() {
+ var mu sync.Mutex
+ a := UnsupportedLockerExample{mu: &mu}
+ a.x = 1
+}
diff --git a/tools/go_marshal/gomarshal/generator.go b/tools/go_marshal/gomarshal/generator.go
index abd6f69ea..634abd1af 100644
--- a/tools/go_marshal/gomarshal/generator.go
+++ b/tools/go_marshal/gomarshal/generator.go
@@ -427,7 +427,7 @@ func (g *Generator) generateOne(t *marshallableType, fset *token.FileSet) *inter
// implementations type t.
func (g *Generator) generateOneTestSuite(t *marshallableType) *testGenerator {
i := newTestGenerator(t.spec, t.recv)
- i.emitTests(t.slice, t.dynamic)
+ i.emitTests(t.slice)
return i
}
@@ -488,7 +488,11 @@ func (g *Generator) Run() error {
panic(fmt.Sprintf("Generated code for '%s' referenced a non-existent import with local name '%s'. Either go-marshal needs to add an import to the generated file, or a package in an input source file has a package name differ from the final component of its path, which go-marshal doesn't know how to detect; use an import alias to work around this limitation.", impl.typeName(), name))
}
}
- ts = append(ts, g.generateOneTestSuite(t))
+ // Do not generate tests for dynamic types because they inherently
+ // violate some go_marshal requirements.
+ if !t.dynamic {
+ ts = append(ts, g.generateOneTestSuite(t))
+ }
}
}
diff --git a/tools/go_marshal/gomarshal/generator_tests.go b/tools/go_marshal/gomarshal/generator_tests.go
index ca3e15c16..6cf00843f 100644
--- a/tools/go_marshal/gomarshal/generator_tests.go
+++ b/tools/go_marshal/gomarshal/generator_tests.go
@@ -216,16 +216,12 @@ func (g *testGenerator) emitTestSizeBytesOnTypedNilPtr() {
})
}
-func (g *testGenerator) emitTests(slice *sliceAPI, isDynamic bool) {
+func (g *testGenerator) emitTests(slice *sliceAPI) {
g.emitTestNonZeroSize()
g.emitTestSuspectAlignment()
- if !isDynamic {
- // Do not test these for dynamic structs because they violate some
- // assumptions that these tests make.
- g.emitTestMarshalUnmarshalPreservesData()
- g.emitTestWriteToUnmarshalPreservesData()
- g.emitTestSizeBytesOnTypedNilPtr()
- }
+ g.emitTestMarshalUnmarshalPreservesData()
+ g.emitTestWriteToUnmarshalPreservesData()
+ g.emitTestSizeBytesOnTypedNilPtr()
if slice != nil {
g.emitTestMarshalUnmarshalSlicePreservesData(slice)
diff --git a/tools/nogo/BUILD b/tools/nogo/BUILD
index 7976c7521..5fc60d8d8 100644
--- a/tools/nogo/BUILD
+++ b/tools/nogo/BUILD
@@ -35,6 +35,7 @@ go_library(
visibility = ["//:sandbox"],
deps = [
"//tools/checkescape",
+ "//tools/checklocks",
"//tools/checkunsafe",
"@co_honnef_go_tools//staticcheck:go_default_library",
"@co_honnef_go_tools//stylecheck:go_default_library",
diff --git a/tools/nogo/analyzers.go b/tools/nogo/analyzers.go
index b919bc2f8..8b4bff3b6 100644
--- a/tools/nogo/analyzers.go
+++ b/tools/nogo/analyzers.go
@@ -47,6 +47,7 @@ import (
"honnef.co/go/tools/stylecheck"
"gvisor.dev/gvisor/tools/checkescape"
+ "gvisor.dev/gvisor/tools/checklocks"
"gvisor.dev/gvisor/tools/checkunsafe"
)
@@ -79,6 +80,7 @@ var AllAnalyzers = []*analysis.Analyzer{
unusedresult.Analyzer,
checkescape.Analyzer,
checkunsafe.Analyzer,
+ checklocks.Analyzer,
}
// EscapeAnalyzers is a list of escape-related analyzers.
diff --git a/tools/verity/BUILD b/tools/verity/BUILD
new file mode 100644
index 000000000..77d16359c
--- /dev/null
+++ b/tools/verity/BUILD
@@ -0,0 +1,15 @@
+load("//tools:defs.bzl", "go_binary")
+
+licenses(["notice"])
+
+go_binary(
+ name = "measure_tool",
+ srcs = [
+ "measure_tool.go",
+ "measure_tool_unsafe.go",
+ ],
+ pure = True,
+ deps = [
+ "//pkg/abi/linux",
+ ],
+)
diff --git a/tools/verity/measure_tool.go b/tools/verity/measure_tool.go
new file mode 100644
index 000000000..0d314ae70
--- /dev/null
+++ b/tools/verity/measure_tool.go
@@ -0,0 +1,87 @@
+// Copyright 2021 The gVisor Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// This binary can be used to run a measurement of the verity file system,
+// generate the corresponding Merkle tree files, and return the root hash.
+package main
+
+import (
+ "flag"
+ "io/ioutil"
+ "log"
+ "os"
+ "syscall"
+
+ "gvisor.dev/gvisor/pkg/abi/linux"
+)
+
+var path = flag.String("path", "", "path to the verity file system.")
+
+const maxDigestSize = 64
+
+type digest struct {
+ metadata linux.DigestMetadata
+ digest [maxDigestSize]byte
+}
+
+func main() {
+ flag.Parse()
+ if *path == "" {
+ log.Fatalf("no path provided")
+ }
+ if err := enableDir(*path); err != nil {
+ log.Fatalf("Failed to enable file system %s: %v", *path, err)
+ }
+ // Print the root hash of the file system to stdout.
+ if err := measure(*path); err != nil {
+ log.Fatalf("Failed to measure file system %s: %v", *path, err)
+ }
+}
+
+// enableDir enables verity features on all the files and sub-directories within
+// path.
+func enableDir(path string) error {
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return err
+ }
+ for _, file := range files {
+ if file.IsDir() {
+ // For directories, first enable its children.
+ if err := enableDir(path + "/" + file.Name()); err != nil {
+ return err
+ }
+ } else if file.Mode().IsRegular() {
+ // For regular files, open and enable verity feature.
+ f, err := os.Open(path + "/" + file.Name())
+ if err != nil {
+ return err
+ }
+ var p uintptr
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_ENABLE_VERITY), p); err != 0 {
+ return err
+ }
+ }
+ }
+ // Once all children are enabled, enable the parent directory.
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ var p uintptr
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_ENABLE_VERITY), p); err != 0 {
+ return err
+ }
+ return nil
+}
diff --git a/runsc/mitigate/mitigate_conf.go b/tools/verity/measure_tool_unsafe.go
index ee326324b..d4079be9e 100644
--- a/runsc/mitigate/mitigate_conf.go
+++ b/tools/verity/measure_tool_unsafe.go
@@ -11,27 +11,29 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
-
-package mitigate
+package main
import (
- "gvisor.dev/gvisor/runsc/flag"
-)
-
-type mitigate struct {
-}
+ "encoding/hex"
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
-// usage returns the usage string portion for the mitigate.
-func (m mitigate) usage() string { return "" }
-
-// setFlags sets additional flags for the Mitigate command.
-func (m mitigate) setFlags(f *flag.FlagSet) {}
-
-// execute performs additional parts of Execute for Mitigate.
-func (m mitigate) execute(set cpuSet, dryrun bool) error {
- return nil
-}
+ "gvisor.dev/gvisor/pkg/abi/linux"
+)
-func (m mitigate) vulnerable(other thread) bool {
- return other.isVulnerable()
+// measure prints the hash of path to stdout.
+func measure(path string) error {
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ var digest digest
+ digest.metadata.DigestSize = maxDigestSize
+ if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, uintptr(f.Fd()), uintptr(linux.FS_IOC_MEASURE_VERITY), uintptr(unsafe.Pointer(&digest))); err != 0 {
+ return err
+ }
+ fmt.Fprintf(os.Stdout, "%s\n", hex.EncodeToString(digest.digest[:digest.metadata.DigestSize]))
+ return err
}